forked from I2P_Developers/i2p.i2p
propagate from branch 'i2p.i2p' (head 5b24a07e8a843d03ea45e664c59b93937c5efc42)
to branch 'i2p.i2p.str4d.fux' (head 0bfff6086d6f72df836909ae379a95ebbe4b6933)
This commit is contained in:
Binary file not shown.
Before Width: | Height: | Size: 379 B |
BIN
apps/i2psnark/icons/basket_put.png
Normal file
BIN
apps/i2psnark/icons/basket_put.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 733 B |
@ -122,6 +122,9 @@ public class I2PSnarkUtil {
|
|||||||
}
|
}
|
||||||
******/
|
******/
|
||||||
|
|
||||||
|
/** @since 0.9.1 */
|
||||||
|
public I2PAppContext getContext() { return _context; }
|
||||||
|
|
||||||
public boolean configured() { return _configured; }
|
public boolean configured() { return _configured; }
|
||||||
|
|
||||||
public void setI2CPConfig(String i2cpHost, int i2cpPort, Map opts) {
|
public void setI2CPConfig(String i2cpHost, int i2cpPort, Map opts) {
|
||||||
|
@ -5,10 +5,10 @@ import java.io.IOException;
|
|||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Random;
|
|
||||||
|
|
||||||
import net.i2p.I2PAppContext;
|
import net.i2p.I2PAppContext;
|
||||||
import net.i2p.data.DataHelper;
|
import net.i2p.data.DataHelper;
|
||||||
|
import net.i2p.util.RandomSource;
|
||||||
|
|
||||||
import org.klomp.snark.bencode.BDecoder;
|
import org.klomp.snark.bencode.BDecoder;
|
||||||
import org.klomp.snark.bencode.BEValue;
|
import org.klomp.snark.bencode.BEValue;
|
||||||
@ -27,7 +27,6 @@ import org.klomp.snark.bencode.BEValue;
|
|||||||
*/
|
*/
|
||||||
class MagnetState {
|
class MagnetState {
|
||||||
public static final int CHUNK_SIZE = 16*1024;
|
public static final int CHUNK_SIZE = 16*1024;
|
||||||
private static final Random random = I2PAppContext.getGlobalContext().random();
|
|
||||||
|
|
||||||
private final byte[] infohash;
|
private final byte[] infohash;
|
||||||
private boolean complete;
|
private boolean complete;
|
||||||
@ -129,7 +128,7 @@ class MagnetState {
|
|||||||
throw new IllegalArgumentException("not initialized");
|
throw new IllegalArgumentException("not initialized");
|
||||||
if (complete)
|
if (complete)
|
||||||
throw new IllegalArgumentException("complete");
|
throw new IllegalArgumentException("complete");
|
||||||
int rand = random.nextInt(totalChunks);
|
int rand = RandomSource.getInstance().nextInt(totalChunks);
|
||||||
for (int i = 0; i < totalChunks; i++) {
|
for (int i = 0; i < totalChunks; i++) {
|
||||||
int chk = (i + rand) % totalChunks;
|
int chk = (i + rand) % totalChunks;
|
||||||
if (!(have.get(chk) || requested.get(chk))) {
|
if (!(have.get(chk) || requested.get(chk))) {
|
||||||
|
@ -25,6 +25,8 @@ import java.util.List;
|
|||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import net.i2p.I2PAppContext;
|
import net.i2p.I2PAppContext;
|
||||||
|
import net.i2p.data.DataHelper;
|
||||||
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* TimerTask that checks for good/bad up/downloader. Works together
|
* TimerTask that checks for good/bad up/downloader. Works together
|
||||||
@ -36,16 +38,18 @@ class PeerCheckerTask implements Runnable
|
|||||||
|
|
||||||
private final PeerCoordinator coordinator;
|
private final PeerCoordinator coordinator;
|
||||||
private final I2PSnarkUtil _util;
|
private final I2PSnarkUtil _util;
|
||||||
|
private final Log _log;
|
||||||
|
private final Random random;
|
||||||
private int _runCount;
|
private int _runCount;
|
||||||
|
|
||||||
PeerCheckerTask(I2PSnarkUtil util, PeerCoordinator coordinator)
|
PeerCheckerTask(I2PSnarkUtil util, PeerCoordinator coordinator)
|
||||||
{
|
{
|
||||||
_util = util;
|
_util = util;
|
||||||
|
_log = util.getContext().logManager().getLog(PeerCheckerTask.class);
|
||||||
|
random = util.getContext().random();
|
||||||
this.coordinator = coordinator;
|
this.coordinator = coordinator;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final Random random = I2PAppContext.getGlobalContext().random();
|
|
||||||
|
|
||||||
public void run()
|
public void run()
|
||||||
{
|
{
|
||||||
_runCount++;
|
_runCount++;
|
||||||
@ -82,6 +86,14 @@ class PeerCheckerTask implements Runnable
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (peer.getInactiveTime() > PeerCoordinator.MAX_INACTIVE) {
|
||||||
|
if (_log.shouldLog(Log.WARN))
|
||||||
|
_log.warn("Disconnecting peer idle " +
|
||||||
|
DataHelper.formatDuration(peer.getInactiveTime()) + ": " + peer);
|
||||||
|
peer.disconnect();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
if (!peer.isChoking())
|
if (!peer.isChoking())
|
||||||
uploaders++;
|
uploaders++;
|
||||||
|
|
||||||
@ -92,14 +104,15 @@ class PeerCheckerTask implements Runnable
|
|||||||
peer.setRateHistory(upload, download);
|
peer.setRateHistory(upload, download);
|
||||||
peer.resetCounters();
|
peer.resetCounters();
|
||||||
|
|
||||||
_util.debug(peer + ":", Snark.DEBUG);
|
if (_log.shouldLog(Log.DEBUG)) {
|
||||||
_util.debug(" ul: " + upload*1024/KILOPERSECOND
|
_log.debug(peer + ":"
|
||||||
|
+ " ul: " + upload*1024/KILOPERSECOND
|
||||||
+ " dl: " + download*1024/KILOPERSECOND
|
+ " dl: " + download*1024/KILOPERSECOND
|
||||||
+ " i: " + peer.isInterested()
|
+ " i: " + peer.isInterested()
|
||||||
+ " I: " + peer.isInteresting()
|
+ " I: " + peer.isInteresting()
|
||||||
+ " c: " + peer.isChoking()
|
+ " c: " + peer.isChoking()
|
||||||
+ " C: " + peer.isChoked(),
|
+ " C: " + peer.isChoked());
|
||||||
Snark.DEBUG);
|
}
|
||||||
|
|
||||||
// Choke a percentage of them rather than all so it isn't so drastic...
|
// Choke a percentage of them rather than all so it isn't so drastic...
|
||||||
// unless this torrent is over the limit all by itself.
|
// unless this torrent is over the limit all by itself.
|
||||||
@ -120,8 +133,8 @@ class PeerCheckerTask implements Runnable
|
|||||||
// Check if it still wants pieces from us.
|
// Check if it still wants pieces from us.
|
||||||
if (!peer.isInterested())
|
if (!peer.isInterested())
|
||||||
{
|
{
|
||||||
_util.debug("Choke uninterested peer: " + peer,
|
if (_log.shouldLog(Log.INFO))
|
||||||
Snark.INFO);
|
_log.debug("Choke uninterested peer: " + peer);
|
||||||
peer.setChoking(true);
|
peer.setChoking(true);
|
||||||
uploaders--;
|
uploaders--;
|
||||||
coordinator.uploaders--;
|
coordinator.uploaders--;
|
||||||
@ -131,8 +144,8 @@ class PeerCheckerTask implements Runnable
|
|||||||
}
|
}
|
||||||
else if (overBWLimitChoke)
|
else if (overBWLimitChoke)
|
||||||
{
|
{
|
||||||
_util.debug("BW limit (" + upload + "/" + uploaded + "), choke peer: " + peer,
|
if (_log.shouldLog(Log.INFO))
|
||||||
Snark.INFO);
|
_log.debug("BW limit (" + upload + "/" + uploaded + "), choke peer: " + peer);
|
||||||
peer.setChoking(true);
|
peer.setChoking(true);
|
||||||
uploaders--;
|
uploaders--;
|
||||||
coordinator.uploaders--;
|
coordinator.uploaders--;
|
||||||
@ -144,7 +157,8 @@ class PeerCheckerTask implements Runnable
|
|||||||
else if (peer.isInteresting() && peer.isChoked())
|
else if (peer.isInteresting() && peer.isChoked())
|
||||||
{
|
{
|
||||||
// If they are choking us make someone else a downloader
|
// If they are choking us make someone else a downloader
|
||||||
_util.debug("Choke choking peer: " + peer, Snark.DEBUG);
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Choke choking peer: " + peer);
|
||||||
peer.setChoking(true);
|
peer.setChoking(true);
|
||||||
uploaders--;
|
uploaders--;
|
||||||
coordinator.uploaders--;
|
coordinator.uploaders--;
|
||||||
@ -156,7 +170,8 @@ class PeerCheckerTask implements Runnable
|
|||||||
else if (!peer.isInteresting() && !coordinator.completed())
|
else if (!peer.isInteresting() && !coordinator.completed())
|
||||||
{
|
{
|
||||||
// If they aren't interesting make someone else a downloader
|
// If they aren't interesting make someone else a downloader
|
||||||
_util.debug("Choke uninteresting peer: " + peer, Snark.DEBUG);
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Choke uninteresting peer: " + peer);
|
||||||
peer.setChoking(true);
|
peer.setChoking(true);
|
||||||
uploaders--;
|
uploaders--;
|
||||||
coordinator.uploaders--;
|
coordinator.uploaders--;
|
||||||
@ -170,8 +185,8 @@ class PeerCheckerTask implements Runnable
|
|||||||
&& download == 0)
|
&& download == 0)
|
||||||
{
|
{
|
||||||
// We are downloading but didn't receive anything...
|
// We are downloading but didn't receive anything...
|
||||||
_util.debug("Choke downloader that doesn't deliver:"
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
+ peer, Snark.DEBUG);
|
_log.debug("Choke downloader that doesn't deliver: " + peer);
|
||||||
peer.setChoking(true);
|
peer.setChoking(true);
|
||||||
uploaders--;
|
uploaders--;
|
||||||
coordinator.uploaders--;
|
coordinator.uploaders--;
|
||||||
@ -198,7 +213,10 @@ class PeerCheckerTask implements Runnable
|
|||||||
// send PEX
|
// send PEX
|
||||||
if ((_runCount % 17) == 0 && !peer.isCompleted())
|
if ((_runCount % 17) == 0 && !peer.isCompleted())
|
||||||
coordinator.sendPeers(peer);
|
coordinator.sendPeers(peer);
|
||||||
peer.keepAlive();
|
// cheap failsafe for seeds connected to seeds, stop pinging and hopefully
|
||||||
|
// the inactive checker (above) will eventually disconnect it
|
||||||
|
if (coordinator.getNeededLength() > 0 || !peer.isCompleted())
|
||||||
|
peer.keepAlive();
|
||||||
// announce them to local tracker (TrackerClient does this too)
|
// announce them to local tracker (TrackerClient does this too)
|
||||||
if (_util.getDHT() != null && (_runCount % 5) == 0) {
|
if (_util.getDHT() != null && (_runCount % 5) == 0) {
|
||||||
_util.getDHT().announce(coordinator.getInfoHash(), peer.getPeerID().getDestHash());
|
_util.getDHT().announce(coordinator.getInfoHash(), peer.getPeerID().getDestHash());
|
||||||
@ -215,8 +233,8 @@ class PeerCheckerTask implements Runnable
|
|||||||
|| uploaders > uploadLimit)
|
|| uploaders > uploadLimit)
|
||||||
&& worstDownloader != null)
|
&& worstDownloader != null)
|
||||||
{
|
{
|
||||||
_util.debug("Choke worst downloader: " + worstDownloader,
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
Snark.DEBUG);
|
_log.debug("Choke worst downloader: " + worstDownloader);
|
||||||
|
|
||||||
worstDownloader.setChoking(true);
|
worstDownloader.setChoking(true);
|
||||||
coordinator.uploaders--;
|
coordinator.uploaders--;
|
||||||
|
@ -68,6 +68,7 @@ class PeerCoordinator implements PeerListener
|
|||||||
// package local for access by CheckDownLoadersTask
|
// package local for access by CheckDownLoadersTask
|
||||||
final static long CHECK_PERIOD = 40*1000; // 40 seconds
|
final static long CHECK_PERIOD = 40*1000; // 40 seconds
|
||||||
final static int MAX_UPLOADERS = 6;
|
final static int MAX_UPLOADERS = 6;
|
||||||
|
public static final long MAX_INACTIVE = 8*60*1000;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Approximation of the number of current uploaders.
|
* Approximation of the number of current uploaders.
|
||||||
@ -125,12 +126,12 @@ class PeerCoordinator implements PeerListener
|
|||||||
/** partial pieces - lock by synching on wantedPieces - TODO store Requests, not PartialPieces */
|
/** partial pieces - lock by synching on wantedPieces - TODO store Requests, not PartialPieces */
|
||||||
private final List<PartialPiece> partialPieces;
|
private final List<PartialPiece> partialPieces;
|
||||||
|
|
||||||
private boolean halted = false;
|
private volatile boolean halted;
|
||||||
|
|
||||||
private final MagnetState magnetState;
|
private final MagnetState magnetState;
|
||||||
private final CoordinatorListener listener;
|
private final CoordinatorListener listener;
|
||||||
private final I2PSnarkUtil _util;
|
private final I2PSnarkUtil _util;
|
||||||
private static final Random _random = I2PAppContext.getGlobalContext().random();
|
private final Random _random;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param metainfo null if in magnet mode
|
* @param metainfo null if in magnet mode
|
||||||
@ -140,6 +141,7 @@ class PeerCoordinator implements PeerListener
|
|||||||
CoordinatorListener listener, Snark torrent)
|
CoordinatorListener listener, Snark torrent)
|
||||||
{
|
{
|
||||||
_util = util;
|
_util = util;
|
||||||
|
_random = util.getContext().random();
|
||||||
this.id = id;
|
this.id = id;
|
||||||
this.infohash = infohash;
|
this.infohash = infohash;
|
||||||
this.metainfo = metainfo;
|
this.metainfo = metainfo;
|
||||||
@ -377,8 +379,10 @@ class PeerCoordinator implements PeerListener
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Reduce max if huge pieces to keep from ooming when leeching
|
* Formerly used to
|
||||||
* @return 512K: 16; 1M: 11; 2M: 6
|
* reduce max if huge pieces to keep from ooming when leeching
|
||||||
|
* but now we don't
|
||||||
|
* @return usually 16
|
||||||
*/
|
*/
|
||||||
private int getMaxConnections() {
|
private int getMaxConnections() {
|
||||||
if (metainfo == null)
|
if (metainfo == null)
|
||||||
@ -388,7 +392,7 @@ class PeerCoordinator implements PeerListener
|
|||||||
return 4;
|
return 4;
|
||||||
if (pieces <= 5)
|
if (pieces <= 5)
|
||||||
return 6;
|
return 6;
|
||||||
int size = metainfo.getPieceLength(0);
|
//int size = metainfo.getPieceLength(0);
|
||||||
int max = _util.getMaxConnections();
|
int max = _util.getMaxConnections();
|
||||||
// Now that we use temp files, no memory concern
|
// Now that we use temp files, no memory concern
|
||||||
//if (size <= 512*1024 || completed())
|
//if (size <= 512*1024 || completed())
|
||||||
@ -429,6 +433,14 @@ class PeerCoordinator implements PeerListener
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @since 0.9.1
|
||||||
|
*/
|
||||||
|
public void restart() {
|
||||||
|
halted = false;
|
||||||
|
timer.schedule((CHECK_PERIOD / 2) + _random.nextInt((int) CHECK_PERIOD));
|
||||||
|
}
|
||||||
|
|
||||||
public void connected(Peer peer)
|
public void connected(Peer peer)
|
||||||
{
|
{
|
||||||
if (halted)
|
if (halted)
|
||||||
@ -441,7 +453,7 @@ class PeerCoordinator implements PeerListener
|
|||||||
synchronized(peers)
|
synchronized(peers)
|
||||||
{
|
{
|
||||||
Peer old = peerIDInList(peer.getPeerID(), peers);
|
Peer old = peerIDInList(peer.getPeerID(), peers);
|
||||||
if ( (old != null) && (old.getInactiveTime() > 8*60*1000) ) {
|
if ( (old != null) && (old.getInactiveTime() > MAX_INACTIVE) ) {
|
||||||
// idle for 8 minutes, kill the old con (32KB/8min = 68B/sec minimum for one block)
|
// idle for 8 minutes, kill the old con (32KB/8min = 68B/sec minimum for one block)
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Remomving old peer: " + peer + ": " + old + ", inactive for " + old.getInactiveTime());
|
_log.warn("Remomving old peer: " + peer + ": " + old + ", inactive for " + old.getInactiveTime());
|
||||||
@ -535,7 +547,7 @@ class PeerCoordinator implements PeerListener
|
|||||||
need_more = (!peer.isConnected()) && peersize < getMaxConnections();
|
need_more = (!peer.isConnected()) && peersize < getMaxConnections();
|
||||||
// Check if we already have this peer before we build the connection
|
// Check if we already have this peer before we build the connection
|
||||||
Peer old = peerIDInList(peer.getPeerID(), peers);
|
Peer old = peerIDInList(peer.getPeerID(), peers);
|
||||||
need_more = need_more && ((old == null) || (old.getInactiveTime() > 8*60*1000));
|
need_more = need_more && ((old == null) || (old.getInactiveTime() > MAX_INACTIVE));
|
||||||
}
|
}
|
||||||
|
|
||||||
if (need_more)
|
if (need_more)
|
||||||
@ -966,11 +978,8 @@ class PeerCoordinator implements PeerListener
|
|||||||
|
|
||||||
// Announce to the world we have it!
|
// Announce to the world we have it!
|
||||||
// Disconnect from other seeders when we get the last piece
|
// Disconnect from other seeders when we get the last piece
|
||||||
List<Peer> toDisconnect = new ArrayList();
|
List<Peer> toDisconnect = done ? new ArrayList() : null;
|
||||||
Iterator<Peer> it = peers.iterator();
|
for (Peer p : peers) {
|
||||||
while (it.hasNext())
|
|
||||||
{
|
|
||||||
Peer p = it.next();
|
|
||||||
if (p.isConnected())
|
if (p.isConnected())
|
||||||
{
|
{
|
||||||
if (done && p.isCompleted())
|
if (done && p.isCompleted())
|
||||||
@ -978,15 +987,13 @@ class PeerCoordinator implements PeerListener
|
|||||||
else
|
else
|
||||||
p.have(piece);
|
p.have(piece);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
it = toDisconnect.iterator();
|
|
||||||
while (it.hasNext())
|
|
||||||
{
|
|
||||||
Peer p = it.next();
|
|
||||||
p.disconnect(true);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (done) {
|
if (done) {
|
||||||
|
for (Peer p : toDisconnect) {
|
||||||
|
p.disconnect(true);
|
||||||
|
}
|
||||||
|
|
||||||
// put msg on the console if partial, since Storage won't do it
|
// put msg on the console if partial, since Storage won't do it
|
||||||
if (!completed())
|
if (!completed())
|
||||||
snark.storageCompleted(storage);
|
snark.storageCompleted(storage);
|
||||||
|
@ -553,21 +553,14 @@ public class Snark
|
|||||||
}
|
}
|
||||||
|
|
||||||
stopped = false;
|
stopped = false;
|
||||||
boolean coordinatorChanged = false;
|
|
||||||
if (coordinator.halted()) {
|
if (coordinator.halted()) {
|
||||||
// ok, we have already started and stopped, but the coordinator seems a bit annoying to
|
coordinator.restart();
|
||||||
// restart safely, so lets build a new one to replace the old
|
|
||||||
if (_peerCoordinatorSet != null)
|
if (_peerCoordinatorSet != null)
|
||||||
_peerCoordinatorSet.remove(coordinator);
|
_peerCoordinatorSet.add(coordinator);
|
||||||
PeerCoordinator newCoord = new PeerCoordinator(_util, id, infoHash, meta, storage, this, this);
|
|
||||||
if (_peerCoordinatorSet != null)
|
|
||||||
_peerCoordinatorSet.add(newCoord);
|
|
||||||
coordinator = newCoord;
|
|
||||||
coordinatorChanged = true;
|
|
||||||
}
|
}
|
||||||
if (!trackerclient.started() && !coordinatorChanged) {
|
if (!trackerclient.started()) {
|
||||||
trackerclient.start();
|
trackerclient.start();
|
||||||
} else if (trackerclient.halted() || coordinatorChanged) {
|
} else if (trackerclient.halted()) {
|
||||||
if (storage != null) {
|
if (storage != null) {
|
||||||
try {
|
try {
|
||||||
storage.reopen(rootDataDir);
|
storage.reopen(rootDataDir);
|
||||||
|
@ -243,11 +243,12 @@ public class FetchAndAdd extends Snark implements EepGet.StatusListener, Runnabl
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return torrent file bytes remaining or -1
|
* @return -1 when done so the web will list us as "complete" instead of "seeding"
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public long getRemainingLength() {
|
public long getRemainingLength() {
|
||||||
return _remaining;
|
long rv = _remaining;
|
||||||
|
return rv > 0 ? rv : -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -1073,7 +1073,7 @@ public class I2PSnarkServlet extends DefaultServlet {
|
|||||||
else if (isValid)
|
else if (isValid)
|
||||||
icon = toIcon(meta.getName());
|
icon = toIcon(meta.getName());
|
||||||
else if (snark instanceof FetchAndAdd)
|
else if (snark instanceof FetchAndAdd)
|
||||||
icon = "arrow_down";
|
icon = "basket_put";
|
||||||
else
|
else
|
||||||
icon = "magnet";
|
icon = "magnet";
|
||||||
if (isValid) {
|
if (isValid) {
|
||||||
@ -1104,7 +1104,7 @@ public class I2PSnarkServlet extends DefaultServlet {
|
|||||||
|
|
||||||
out.write("<td align=\"right\" class=\"snarkTorrentETA " + rowClass + "\">");
|
out.write("<td align=\"right\" class=\"snarkTorrentETA " + rowClass + "\">");
|
||||||
if(isRunning && remainingSeconds > 0)
|
if(isRunning && remainingSeconds > 0)
|
||||||
out.write(DataHelper.formatDuration2(remainingSeconds*1000)); // (eta 6h)
|
out.write(DataHelper.formatDuration2(Math.max(remainingSeconds, 10) * 1000)); // (eta 6h)
|
||||||
out.write("</td>\n\t");
|
out.write("</td>\n\t");
|
||||||
out.write("<td align=\"right\" class=\"snarkTorrentDownloaded " + rowClass + "\">");
|
out.write("<td align=\"right\" class=\"snarkTorrentDownloaded " + rowClass + "\">");
|
||||||
if (remaining > 0)
|
if (remaining > 0)
|
||||||
|
@ -511,7 +511,7 @@ public abstract class I2PTunnelClientBase extends I2PTunnelTask implements Runna
|
|||||||
if (sm == null)
|
if (sm == null)
|
||||||
return;
|
return;
|
||||||
Properties props = tunnel.getClientOptions();
|
Properties props = tunnel.getClientOptions();
|
||||||
sm.setDefaultOptions(sockMgr.buildOptions(props));
|
sm.setDefaultOptions(sm.buildOptions(props));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -79,7 +79,8 @@ public class IrcInboundFilter implements Runnable {
|
|||||||
outmsg=outmsg+"\r\n"; // rfc1459 sec. 2.3
|
outmsg=outmsg+"\r\n"; // rfc1459 sec. 2.3
|
||||||
output.write(outmsg.getBytes("ISO-8859-1"));
|
output.write(outmsg.getBytes("ISO-8859-1"));
|
||||||
// probably doesn't do much but can't hurt
|
// probably doesn't do much but can't hurt
|
||||||
output.flush();
|
if (!in.ready())
|
||||||
|
output.flush();
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("inbound BLOCKED: "+inmsg);
|
_log.warn("inbound BLOCKED: "+inmsg);
|
||||||
|
@ -79,7 +79,9 @@ public class IrcOutboundFilter implements Runnable {
|
|||||||
outmsg=outmsg+"\r\n"; // rfc1459 sec. 2.3
|
outmsg=outmsg+"\r\n"; // rfc1459 sec. 2.3
|
||||||
output.write(outmsg.getBytes("ISO-8859-1"));
|
output.write(outmsg.getBytes("ISO-8859-1"));
|
||||||
// save 250 ms in streaming
|
// save 250 ms in streaming
|
||||||
output.flush();
|
// Check ready() so we don't split the initial handshake up into multiple streaming messages
|
||||||
|
if (!in.ready())
|
||||||
|
output.flush();
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("outbound BLOCKED: "+"\""+inmsg+"\"");
|
_log.warn("outbound BLOCKED: "+"\""+inmsg+"\"");
|
||||||
|
@ -158,7 +158,7 @@ public class PluginUpdateChecker extends UpdateHandler {
|
|||||||
try {
|
try {
|
||||||
_get = new PartialEepGet(_context, proxyHost, proxyPort, _baos, _xpi2pURL, TrustedUpdate.HEADER_BYTES);
|
_get = new PartialEepGet(_context, proxyHost, proxyPort, _baos, _xpi2pURL, TrustedUpdate.HEADER_BYTES);
|
||||||
_get.addStatusListener(PluginUpdateCheckerRunner.this);
|
_get.addStatusListener(PluginUpdateCheckerRunner.this);
|
||||||
_get.fetch();
|
_get.fetch(CONNECT_TIMEOUT);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_log.error("Error checking update for plugin", t);
|
_log.error("Error checking update for plugin", t);
|
||||||
}
|
}
|
||||||
|
@ -149,7 +149,7 @@ public class PluginUpdateHandler extends UpdateHandler {
|
|||||||
else
|
else
|
||||||
_get = new EepGet(_context, 1, _updateFile, _xpi2pURL, false);
|
_get = new EepGet(_context, 1, _updateFile, _xpi2pURL, false);
|
||||||
_get.addStatusListener(PluginUpdateRunner.this);
|
_get.addStatusListener(PluginUpdateRunner.this);
|
||||||
_get.fetch();
|
_get.fetch(CONNECT_TIMEOUT, -1, shouldProxy ? INACTIVITY_TIMEOUT : NOPROXY_INACTIVITY_TIMEOUT);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_log.error("Error downloading plugin", t);
|
_log.error("Error downloading plugin", t);
|
||||||
}
|
}
|
||||||
|
@ -75,7 +75,7 @@ public class UnsignedUpdateHandler extends UpdateHandler {
|
|||||||
// 40 retries!!
|
// 40 retries!!
|
||||||
_get = new EepGet(_context, proxyHost, proxyPort, 40, _updateFile, _zipURL, false);
|
_get = new EepGet(_context, proxyHost, proxyPort, 40, _updateFile, _zipURL, false);
|
||||||
_get.addStatusListener(UnsignedUpdateRunner.this);
|
_get.addStatusListener(UnsignedUpdateRunner.this);
|
||||||
_get.fetch();
|
_get.fetch(CONNECT_TIMEOUT, -1, INACTIVITY_TIMEOUT);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_log.error("Error updating", t);
|
_log.error("Error updating", t);
|
||||||
}
|
}
|
||||||
|
@ -45,6 +45,10 @@ public class UpdateHandler {
|
|||||||
static final String PROP_UPDATE_IN_PROGRESS = "net.i2p.router.web.UpdateHandler.updateInProgress";
|
static final String PROP_UPDATE_IN_PROGRESS = "net.i2p.router.web.UpdateHandler.updateInProgress";
|
||||||
protected static final String PROP_LAST_UPDATE_TIME = "router.updateLastDownloaded";
|
protected static final String PROP_LAST_UPDATE_TIME = "router.updateLastDownloaded";
|
||||||
|
|
||||||
|
protected static final long CONNECT_TIMEOUT = 55*1000;
|
||||||
|
protected static final long INACTIVITY_TIMEOUT = 5*60*1000;
|
||||||
|
protected static final long NOPROXY_INACTIVITY_TIMEOUT = 60*1000;
|
||||||
|
|
||||||
public UpdateHandler() {
|
public UpdateHandler() {
|
||||||
this(ContextHelper.getContext(null));
|
this(ContextHelper.getContext(null));
|
||||||
}
|
}
|
||||||
@ -193,7 +197,7 @@ public class UpdateHandler {
|
|||||||
// no retries
|
// no retries
|
||||||
_get = new PartialEepGet(_context, proxyHost, proxyPort, _baos, updateURL, TrustedUpdate.HEADER_BYTES);
|
_get = new PartialEepGet(_context, proxyHost, proxyPort, _baos, updateURL, TrustedUpdate.HEADER_BYTES);
|
||||||
_get.addStatusListener(UpdateRunner.this);
|
_get.addStatusListener(UpdateRunner.this);
|
||||||
_get.fetch();
|
_get.fetch(CONNECT_TIMEOUT);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_isNewer = false;
|
_isNewer = false;
|
||||||
}
|
}
|
||||||
@ -210,7 +214,7 @@ public class UpdateHandler {
|
|||||||
else
|
else
|
||||||
_get = new EepGet(_context, 1, _updateFile, updateURL, false);
|
_get = new EepGet(_context, 1, _updateFile, updateURL, false);
|
||||||
_get.addStatusListener(UpdateRunner.this);
|
_get.addStatusListener(UpdateRunner.this);
|
||||||
_get.fetch();
|
_get.fetch(CONNECT_TIMEOUT, -1, shouldProxy ? INACTIVITY_TIMEOUT : NOPROXY_INACTIVITY_TIMEOUT);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_log.error("Error updating", t);
|
_log.error("Error updating", t);
|
||||||
}
|
}
|
||||||
|
@ -34,19 +34,19 @@ class Connection {
|
|||||||
private boolean _resetReceived;
|
private boolean _resetReceived;
|
||||||
private boolean _resetSent;
|
private boolean _resetSent;
|
||||||
private long _resetSentOn;
|
private long _resetSentOn;
|
||||||
private boolean _connected;
|
private volatile boolean _connected;
|
||||||
private boolean _hardDisconnected;
|
private boolean _hardDisconnected;
|
||||||
private final MessageInputStream _inputStream;
|
private final MessageInputStream _inputStream;
|
||||||
private final MessageOutputStream _outputStream;
|
private final MessageOutputStream _outputStream;
|
||||||
private final SchedulerChooser _chooser;
|
private final SchedulerChooser _chooser;
|
||||||
private long _nextSendTime;
|
private volatile long _nextSendTime;
|
||||||
private long _ackedPackets;
|
private long _ackedPackets;
|
||||||
private final long _createdOn;
|
private final long _createdOn;
|
||||||
private long _closeSentOn;
|
private long _closeSentOn;
|
||||||
private long _closeReceivedOn;
|
private long _closeReceivedOn;
|
||||||
private int _unackedPacketsReceived;
|
private int _unackedPacketsReceived;
|
||||||
private long _congestionWindowEnd;
|
private long _congestionWindowEnd;
|
||||||
private long _highestAckedThrough;
|
private volatile long _highestAckedThrough;
|
||||||
private boolean _isInbound;
|
private boolean _isInbound;
|
||||||
private boolean _updatedShareOpts;
|
private boolean _updatedShareOpts;
|
||||||
/** Packet ID (Long) to PacketLocal for sent but unacked packets */
|
/** Packet ID (Long) to PacketLocal for sent but unacked packets */
|
||||||
@ -60,11 +60,11 @@ class Connection {
|
|||||||
private String _connectionError;
|
private String _connectionError;
|
||||||
private long _disconnectScheduledOn;
|
private long _disconnectScheduledOn;
|
||||||
private long _lastReceivedOn;
|
private long _lastReceivedOn;
|
||||||
private ActivityTimer _activityTimer;
|
private final ActivityTimer _activityTimer;
|
||||||
/** window size when we last saw congestion */
|
/** window size when we last saw congestion */
|
||||||
private int _lastCongestionSeenAt;
|
private int _lastCongestionSeenAt;
|
||||||
private long _lastCongestionTime;
|
private long _lastCongestionTime;
|
||||||
private long _lastCongestionHighestUnacked;
|
private volatile long _lastCongestionHighestUnacked;
|
||||||
private boolean _ackSinceCongestion;
|
private boolean _ackSinceCongestion;
|
||||||
/** Notify this on connection (or connection failure) */
|
/** Notify this on connection (or connection failure) */
|
||||||
private final Object _connectLock;
|
private final Object _connectLock;
|
||||||
@ -96,7 +96,9 @@ class Connection {
|
|||||||
}
|
}
|
||||||
****/
|
****/
|
||||||
|
|
||||||
/** */
|
/**
|
||||||
|
* @param opts may be null
|
||||||
|
*/
|
||||||
public Connection(I2PAppContext ctx, ConnectionManager manager, SchedulerChooser chooser,
|
public Connection(I2PAppContext ctx, ConnectionManager manager, SchedulerChooser chooser,
|
||||||
SimpleTimer2 timer,
|
SimpleTimer2 timer,
|
||||||
PacketQueue queue, ConnectionPacketHandler handler, ConnectionOptions opts) {
|
PacketQueue queue, ConnectionPacketHandler handler, ConnectionOptions opts) {
|
||||||
@ -138,10 +140,7 @@ class Connection {
|
|||||||
_resetSentOn = -1;
|
_resetSentOn = -1;
|
||||||
_connectionEvent = new ConEvent();
|
_connectionEvent = new ConEvent();
|
||||||
_randomWait = _context.random().nextInt(10*1000); // just do this once to reduce usage
|
_randomWait = _context.random().nextInt(10*1000); // just do this once to reduce usage
|
||||||
_context.statManager().createRateStat("stream.con.windowSizeAtCongestion", "How large was our send window when we send a dup?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
// all createRateStats in ConnectionManager
|
||||||
_context.statManager().createRateStat("stream.chokeSizeBegin", "How many messages were outstanding when we started to choke?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
|
||||||
_context.statManager().createRateStat("stream.chokeSizeEnd", "How many messages were outstanding when we stopped being choked?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
|
||||||
_context.statManager().createRateStat("stream.fastRetransmit", "How long a packet has been around for if it has been resent per the fast retransmit timer?", "Stream", new long[] { 60*1000, 10*60*1000 });
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("New connection created with options: " + _options);
|
_log.info("New connection created with options: " + _options);
|
||||||
}
|
}
|
||||||
@ -169,7 +168,6 @@ class Connection {
|
|||||||
* will return false after 5 minutes even if timeoutMs is <= 0.
|
* will return false after 5 minutes even if timeoutMs is <= 0.
|
||||||
*/
|
*/
|
||||||
boolean packetSendChoke(long timeoutMs) {
|
boolean packetSendChoke(long timeoutMs) {
|
||||||
// if (false) return true; // <--- what the fuck??
|
|
||||||
long start = _context.clock().now();
|
long start = _context.clock().now();
|
||||||
long writeExpire = start + timeoutMs; // only used if timeoutMs > 0
|
long writeExpire = start + timeoutMs; // only used if timeoutMs > 0
|
||||||
boolean started = false;
|
boolean started = false;
|
||||||
@ -187,19 +185,26 @@ class Connection {
|
|||||||
if (!_connected)
|
if (!_connected)
|
||||||
return false;
|
return false;
|
||||||
started = true;
|
started = true;
|
||||||
if ( (_outboundPackets.size() >= _options.getWindowSize()) || (_activeResends > 0) ||
|
// Try to keep things moving even during NACKs and retransmissions...
|
||||||
(_lastSendId.get() - _highestAckedThrough > _options.getWindowSize()) ) {
|
// Limit unacked packets to the window
|
||||||
|
// Limit active resends to half the window
|
||||||
|
// Limit (highest-lowest) to twice the window (if far end doesn't like it, it can send a choke)
|
||||||
|
int unacked = _outboundPackets.size();
|
||||||
|
int wsz = _options.getWindowSize();
|
||||||
|
if (unacked >= wsz ||
|
||||||
|
_activeResends >= (wsz + 1) / 2 ||
|
||||||
|
_lastSendId.get() - _highestAckedThrough >= Math.max(MAX_WINDOW_SIZE, 2 * wsz)) {
|
||||||
if (timeoutMs > 0) {
|
if (timeoutMs > 0) {
|
||||||
if (timeLeft <= 0) {
|
if (timeLeft <= 0) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("Outbound window is full of " + _outboundPackets.size()
|
_log.info("Outbound window is full " + unacked
|
||||||
+ " with " + _activeResends + " active resends"
|
+ " unacked with " + _activeResends + " active resends"
|
||||||
+ " and we've waited too long (" + (0-(timeLeft - timeoutMs)) + "ms): "
|
+ " and we've waited too long (" + (0-(timeLeft - timeoutMs)) + "ms): "
|
||||||
+ toString());
|
+ toString());
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Outbound window is full (" + _outboundPackets.size() + "/" + _options.getWindowSize() + "/"
|
_log.debug("Outbound window is full (" + unacked + "/" + wsz + "/"
|
||||||
+ _activeResends + "), waiting " + timeLeft);
|
+ _activeResends + "), waiting " + timeLeft);
|
||||||
try { _outboundPackets.wait(Math.min(timeLeft,250l)); } catch (InterruptedException ie) { if (_log.shouldLog(Log.DEBUG)) _log.debug("InterruptedException while Outbound window is full (" + _outboundPackets.size() + "/" + _activeResends +")"); return false;}
|
try { _outboundPackets.wait(Math.min(timeLeft,250l)); } catch (InterruptedException ie) { if (_log.shouldLog(Log.DEBUG)) _log.debug("InterruptedException while Outbound window is full (" + _outboundPackets.size() + "/" + _activeResends +")"); return false;}
|
||||||
} else {
|
} else {
|
||||||
@ -223,6 +228,12 @@ class Connection {
|
|||||||
|
|
||||||
void ackImmediately() {
|
void ackImmediately() {
|
||||||
PacketLocal packet = null;
|
PacketLocal packet = null;
|
||||||
|
/*** why would we do this?
|
||||||
|
was it to force a congestion indication at the other end?
|
||||||
|
an expensive way to do that...
|
||||||
|
One big user was via SchedulerClosing to resend a CLOSE packet,
|
||||||
|
but why do that either...
|
||||||
|
|
||||||
synchronized (_outboundPackets) {
|
synchronized (_outboundPackets) {
|
||||||
if (!_outboundPackets.isEmpty()) {
|
if (!_outboundPackets.isEmpty()) {
|
||||||
// ordered, so pick the lowest to retransmit
|
// ordered, so pick the lowest to retransmit
|
||||||
@ -239,6 +250,7 @@ class Connection {
|
|||||||
}
|
}
|
||||||
ResendPacketEvent evt = (ResendPacketEvent)packet.getResendEvent();
|
ResendPacketEvent evt = (ResendPacketEvent)packet.getResendEvent();
|
||||||
if (evt != null) {
|
if (evt != null) {
|
||||||
|
// fixme should we set a flag and reschedule instead? or synch?
|
||||||
boolean sent = evt.retransmit(false);
|
boolean sent = evt.retransmit(false);
|
||||||
if (sent) {
|
if (sent) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
@ -251,7 +263,9 @@ class Connection {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
***/
|
||||||
// if we don't have anything to retransmit, send a small ack
|
// if we don't have anything to retransmit, send a small ack
|
||||||
|
// this calls sendPacket() below
|
||||||
packet = _receiver.send(null, 0, 0);
|
packet = _receiver.send(null, 0, 0);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("sending new ack: " + packet);
|
_log.debug("sending new ack: " + packet);
|
||||||
@ -281,11 +295,15 @@ class Connection {
|
|||||||
reply.setReceiveStreamId(_receiveStreamId);
|
reply.setReceiveStreamId(_receiveStreamId);
|
||||||
reply.setOptionalFrom(_connectionManager.getSession().getMyDestination());
|
reply.setOptionalFrom(_connectionManager.getSession().getMyDestination());
|
||||||
// this just sends the packet - no retries or whatnot
|
// this just sends the packet - no retries or whatnot
|
||||||
_outboundQueue.enqueue(reply);
|
if (_outboundQueue.enqueue(reply)) {
|
||||||
|
_unackedPacketsReceived = 0;
|
||||||
|
_lastSendTime = _context.clock().now();
|
||||||
|
resetActivityTimer();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flush any data that we can
|
* Flush any data that we can. Non-blocking.
|
||||||
*/
|
*/
|
||||||
void sendAvailable() {
|
void sendAvailable() {
|
||||||
// this grabs the data, builds a packet, and queues it up via sendPacket
|
// this grabs the data, builds a packet, and queues it up via sendPacket
|
||||||
@ -301,7 +319,6 @@ class Connection {
|
|||||||
if (packet == null) return;
|
if (packet == null) return;
|
||||||
|
|
||||||
setNextSendTime(-1);
|
setNextSendTime(-1);
|
||||||
_unackedPacketsReceived = 0;
|
|
||||||
if (_options.getRequireFullySigned()) {
|
if (_options.getRequireFullySigned()) {
|
||||||
packet.setFlag(Packet.FLAG_SIGNATURE_INCLUDED);
|
packet.setFlag(Packet.FLAG_SIGNATURE_INCLUDED);
|
||||||
packet.setFlag(Packet.FLAG_SIGNATURE_REQUESTED);
|
packet.setFlag(Packet.FLAG_SIGNATURE_REQUESTED);
|
||||||
@ -328,8 +345,8 @@ class Connection {
|
|||||||
(packet.getSequenceNum() % 8 == 0)) {
|
(packet.getSequenceNum() % 8 == 0)) {
|
||||||
packet.setOptionalDelay(0);
|
packet.setOptionalDelay(0);
|
||||||
packet.setFlag(Packet.FLAG_DELAY_REQUESTED);
|
packet.setFlag(Packet.FLAG_DELAY_REQUESTED);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Requesting no ack delay for packet " + packet);
|
// _log.debug("Requesting no ack delay for packet " + packet);
|
||||||
} else {
|
} else {
|
||||||
// This is somewhat of a waste of time, unless the RTT < 4000,
|
// This is somewhat of a waste of time, unless the RTT < 4000,
|
||||||
// since the other end limits it to getSendAckDelay()
|
// since the other end limits it to getSendAckDelay()
|
||||||
@ -358,10 +375,12 @@ class Connection {
|
|||||||
// warning, getStatLog() can be null
|
// warning, getStatLog() can be null
|
||||||
//_context.statManager().getStatLog().addData(Packet.toId(_sendStreamId), "stream.rtt", _options.getRTT(), _options.getWindowSize());
|
//_context.statManager().getStatLog().addData(Packet.toId(_sendStreamId), "stream.rtt", _options.getRTT(), _options.getWindowSize());
|
||||||
|
|
||||||
_lastSendTime = _context.clock().now();
|
if (_outboundQueue.enqueue(packet)) {
|
||||||
_outboundQueue.enqueue(packet);
|
_unackedPacketsReceived = 0;
|
||||||
resetActivityTimer();
|
_lastSendTime = _context.clock().now();
|
||||||
|
resetActivityTimer();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
if (ackOnly) {
|
if (ackOnly) {
|
||||||
// ACK only, don't schedule this packet for retries
|
// ACK only, don't schedule this packet for retries
|
||||||
@ -397,6 +416,7 @@ class Connection {
|
|||||||
* @return List of packets acked or null
|
* @return List of packets acked or null
|
||||||
*/
|
*/
|
||||||
List<PacketLocal> ackPackets(long ackThrough, long nacks[]) {
|
List<PacketLocal> ackPackets(long ackThrough, long nacks[]) {
|
||||||
|
// FIXME synch this part too?
|
||||||
if (ackThrough < _highestAckedThrough) {
|
if (ackThrough < _highestAckedThrough) {
|
||||||
// dupack which won't tell us anything
|
// dupack which won't tell us anything
|
||||||
} else {
|
} else {
|
||||||
@ -415,16 +435,17 @@ class Connection {
|
|||||||
|
|
||||||
List<PacketLocal> acked = null;
|
List<PacketLocal> acked = null;
|
||||||
synchronized (_outboundPackets) {
|
synchronized (_outboundPackets) {
|
||||||
for (Iterator<Long> iter = _outboundPackets.keySet().iterator(); iter.hasNext(); ) {
|
for (Map.Entry<Long, PacketLocal> e : _outboundPackets.entrySet()) {
|
||||||
Long id = iter.next();
|
long id = e.getKey().longValue();
|
||||||
if (id.longValue() <= ackThrough) {
|
if (id <= ackThrough) {
|
||||||
boolean nacked = false;
|
boolean nacked = false;
|
||||||
if (nacks != null) {
|
if (nacks != null) {
|
||||||
// linear search since its probably really tiny
|
// linear search since its probably really tiny
|
||||||
for (int i = 0; i < nacks.length; i++) {
|
for (int i = 0; i < nacks.length; i++) {
|
||||||
if (nacks[i] == id.longValue()) {
|
if (nacks[i] == id) {
|
||||||
nacked = true;
|
nacked = true;
|
||||||
PacketLocal nackedPacket = _outboundPackets.get(id);
|
PacketLocal nackedPacket = e.getValue();
|
||||||
|
// this will do a fast retransmit if appropriate
|
||||||
nackedPacket.incrementNACKs();
|
nackedPacket.incrementNACKs();
|
||||||
break; // NACKed
|
break; // NACKed
|
||||||
}
|
}
|
||||||
@ -433,11 +454,27 @@ class Connection {
|
|||||||
if (!nacked) { // aka ACKed
|
if (!nacked) { // aka ACKed
|
||||||
if (acked == null)
|
if (acked == null)
|
||||||
acked = new ArrayList(1);
|
acked = new ArrayList(1);
|
||||||
PacketLocal ackedPacket = _outboundPackets.get(id);
|
PacketLocal ackedPacket = e.getValue();
|
||||||
ackedPacket.ackReceived();
|
ackedPacket.ackReceived();
|
||||||
acked.add(ackedPacket);
|
acked.add(ackedPacket);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
// TODO
|
||||||
|
// we do not currently do an "implicit nack" of the packets higher
|
||||||
|
// than ackThrough, so those will not be fast retransmitted
|
||||||
|
// we could incrementNACK them here... but we may need to set the fastRettransmit
|
||||||
|
// threshold back to 3 for that.
|
||||||
|
// this will do a fast retransmit if appropriate
|
||||||
|
// This doesn't work because every packet has an ACK in it, so we hit the
|
||||||
|
// FAST_TRANSMIT threshold in a heartbeat and retransmit everything,
|
||||||
|
// even with the threshold at 3. (we never set the NO_ACK field in the header)
|
||||||
|
// Also, we may need to track that we
|
||||||
|
// have the same ackThrough for 3 or 4 consecutive times.
|
||||||
|
// See https://secure.wikimedia.org/wikipedia/en/wiki/Fast_retransmit
|
||||||
|
//if (_log.shouldLog(Log.INFO))
|
||||||
|
// _log.info("ACK thru " + ackThrough + " implicitly NACKs " + id);
|
||||||
|
//PacketLocal nackedPacket = e.getValue();
|
||||||
|
//nackedPacket.incrementNACKs();
|
||||||
break; // _outboundPackets is ordered
|
break; // _outboundPackets is ordered
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -465,31 +502,33 @@ class Connection {
|
|||||||
return acked;
|
return acked;
|
||||||
}
|
}
|
||||||
|
|
||||||
private long _occurredTime;
|
//private long _occurredTime;
|
||||||
private long _occurredEventCount;
|
//private long _occurredEventCount;
|
||||||
|
|
||||||
void eventOccurred() {
|
void eventOccurred() {
|
||||||
long now = System.currentTimeMillis();
|
//long now = System.currentTimeMillis();
|
||||||
|
|
||||||
TaskScheduler sched = _chooser.getScheduler(this);
|
TaskScheduler sched = _chooser.getScheduler(this);
|
||||||
|
|
||||||
now = now - now % 1000;
|
//now = now - now % 1000;
|
||||||
if (_occurredTime == now) {
|
//if (_occurredTime == now) {
|
||||||
_occurredEventCount++;
|
// _occurredEventCount++;
|
||||||
} else {
|
//} else {
|
||||||
_occurredTime = now;
|
// _occurredTime = now;
|
||||||
if ( (_occurredEventCount > 1000) && (_log.shouldLog(Log.WARN)) ) {
|
// if ( (_occurredEventCount > 1000) && (_log.shouldLog(Log.WARN)) ) {
|
||||||
_log.warn("More than 1000 events (" + _occurredEventCount + ") in a second on "
|
// _log.warn("More than 1000 events (" + _occurredEventCount + ") in a second on "
|
||||||
+ toString() + ": scheduler = " + sched);
|
// + toString() + ": scheduler = " + sched);
|
||||||
}
|
// }
|
||||||
_occurredEventCount = 0;
|
// _occurredEventCount = 0;
|
||||||
}
|
//}
|
||||||
|
|
||||||
long before = System.currentTimeMillis();
|
long before = System.currentTimeMillis();
|
||||||
|
|
||||||
sched.eventOccurred(this);
|
sched.eventOccurred(this);
|
||||||
long elapsed = System.currentTimeMillis() - before;
|
long elapsed = System.currentTimeMillis() - before;
|
||||||
if ( (elapsed > 1000) && (_log.shouldLog(Log.WARN)) )
|
// 250 and warn for debugging
|
||||||
_log.warn("Took " + elapsed + "ms to pump through " + sched);
|
if ( (elapsed > 250) && (_log.shouldLog(Log.WARN)) )
|
||||||
|
_log.warn("Took " + elapsed + "ms to pump through " + sched + " on " + toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
void resetReceived() {
|
void resetReceived() {
|
||||||
@ -498,12 +537,8 @@ class Connection {
|
|||||||
SimpleScheduler.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
|
SimpleScheduler.getInstance().addEvent(new DisconnectEvent(), DISCONNECT_TIMEOUT);
|
||||||
}
|
}
|
||||||
_resetReceived = true;
|
_resetReceived = true;
|
||||||
MessageOutputStream mos = _outputStream;
|
_outputStream.streamErrorOccurred(new IOException("Reset received"));
|
||||||
MessageInputStream mis = _inputStream;
|
_inputStream.streamErrorOccurred(new IOException("Reset received"));
|
||||||
if (mos != null)
|
|
||||||
mos.streamErrorOccurred(new IOException("Reset received"));
|
|
||||||
if (mis != null)
|
|
||||||
mis.streamErrorOccurred(new IOException("Reset received"));
|
|
||||||
_connectionError = "Connection reset";
|
_connectionError = "Connection reset";
|
||||||
synchronized (_connectLock) { _connectLock.notifyAll(); }
|
synchronized (_connectLock) { _connectLock.notifyAll(); }
|
||||||
}
|
}
|
||||||
@ -556,15 +591,10 @@ class Connection {
|
|||||||
s.destroy2();
|
s.destroy2();
|
||||||
_socket = null;
|
_socket = null;
|
||||||
}
|
}
|
||||||
if (_outputStream != null)
|
_outputStream.destroy();
|
||||||
_outputStream.destroy();
|
_receiver.destroy();
|
||||||
if (_receiver != null)
|
_activityTimer.cancel();
|
||||||
_receiver.destroy();
|
_inputStream.streamErrorOccurred(new IOException("disconnected!"));
|
||||||
if (_activityTimer != null)
|
|
||||||
_activityTimer.cancel();
|
|
||||||
//_activityTimer = null;
|
|
||||||
if (_inputStream != null)
|
|
||||||
_inputStream.streamErrorOccurred(new IOException("disconnected!"));
|
|
||||||
|
|
||||||
if (_disconnectScheduledOn < 0) {
|
if (_disconnectScheduledOn < 0) {
|
||||||
_disconnectScheduledOn = _context.clock().now();
|
_disconnectScheduledOn = _context.clock().now();
|
||||||
@ -656,11 +686,7 @@ class Connection {
|
|||||||
* @return Last time we sent data
|
* @return Last time we sent data
|
||||||
*/
|
*/
|
||||||
public long getLastSendTime() { return _lastSendTime; }
|
public long getLastSendTime() { return _lastSendTime; }
|
||||||
/** Set the time we sent data.
|
|
||||||
* @param when The time we sent data
|
|
||||||
*/
|
|
||||||
public void setLastSendTime(long when) { _lastSendTime = when; }
|
|
||||||
|
|
||||||
/** What was the last packet Id sent to the peer?
|
/** What was the last packet Id sent to the peer?
|
||||||
* @return The last sent packet ID
|
* @return The last sent packet ID
|
||||||
*/
|
*/
|
||||||
@ -795,10 +821,9 @@ class Connection {
|
|||||||
|
|
||||||
public long getCongestionWindowEnd() { return _congestionWindowEnd; }
|
public long getCongestionWindowEnd() { return _congestionWindowEnd; }
|
||||||
public void setCongestionWindowEnd(long endMsg) { _congestionWindowEnd = endMsg; }
|
public void setCongestionWindowEnd(long endMsg) { _congestionWindowEnd = endMsg; }
|
||||||
|
|
||||||
/** @return the highest outbound packet we have recieved an ack for */
|
/** @return the highest outbound packet we have recieved an ack for */
|
||||||
public long getHighestAckedThrough() { return _highestAckedThrough; }
|
public long getHighestAckedThrough() { return _highestAckedThrough; }
|
||||||
/** @deprecated unused */
|
|
||||||
public void setHighestAckedThrough(long msgNum) { _highestAckedThrough = msgNum; }
|
|
||||||
|
|
||||||
public long getLastActivityOn() {
|
public long getLastActivityOn() {
|
||||||
return (_lastSendTime > _lastReceivedOn ? _lastSendTime : _lastReceivedOn);
|
return (_lastSendTime > _lastReceivedOn ? _lastSendTime : _lastReceivedOn);
|
||||||
@ -878,17 +903,12 @@ class Connection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private void resetActivityTimer() {
|
private void resetActivityTimer() {
|
||||||
if (_options.getInactivityTimeout() <= 0) {
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Resetting the inactivity timer, but its gone!", new Exception("where did it go?"));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (_activityTimer == null) {
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Resetting the inactivity timer, but its gone!", new Exception("where did it go?"));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
long howLong = _options.getInactivityTimeout();
|
long howLong = _options.getInactivityTimeout();
|
||||||
|
if (howLong <= 0) {
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("Resetting the inactivity timer, but its gone!", new Exception("where did it go?"));
|
||||||
|
return;
|
||||||
|
}
|
||||||
howLong += _randomWait; // randomize it a bit, so both sides don't do it at once
|
howLong += _randomWait; // randomize it a bit, so both sides don't do it at once
|
||||||
//if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
// _log.debug("Resetting the inactivity timer to " + howLong);
|
// _log.debug("Resetting the inactivity timer to " + howLong);
|
||||||
@ -983,12 +1003,12 @@ class Connection {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** stream that the local peer receives data on
|
/** stream that the local peer receives data on
|
||||||
* @return the inbound message stream
|
* @return the inbound message stream, non-null
|
||||||
*/
|
*/
|
||||||
public MessageInputStream getInputStream() { return _inputStream; }
|
public MessageInputStream getInputStream() { return _inputStream; }
|
||||||
|
|
||||||
/** stream that the local peer sends data to the remote peer on
|
/** stream that the local peer sends data to the remote peer on
|
||||||
* @return the outbound message stream
|
* @return the outbound message stream, non-null
|
||||||
*/
|
*/
|
||||||
public MessageOutputStream getOutputStream() { return _outputStream; }
|
public MessageOutputStream getOutputStream() { return _outputStream; }
|
||||||
|
|
||||||
@ -1032,12 +1052,10 @@ class Connection {
|
|||||||
*/
|
*/
|
||||||
buf.append("unacked in: ").append(getUnackedPacketsReceived());
|
buf.append("unacked in: ").append(getUnackedPacketsReceived());
|
||||||
int missing = 0;
|
int missing = 0;
|
||||||
if (_inputStream != null) {
|
long nacks[] = _inputStream.getNacks();
|
||||||
long nacks[] = _inputStream.getNacks();
|
if (nacks != null) {
|
||||||
if (nacks != null) {
|
missing = nacks.length;
|
||||||
missing = nacks.length;
|
buf.append(" [").append(missing).append(" missing]");
|
||||||
buf.append(" [").append(missing).append(" missing]");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (getResetSent())
|
if (getResetSent())
|
||||||
@ -1053,8 +1071,7 @@ class Connection {
|
|||||||
if (getCloseReceivedOn() > 0)
|
if (getCloseReceivedOn() > 0)
|
||||||
buf.append(" close received ").append(DataHelper.formatDuration(_context.clock().now() - getCloseReceivedOn())).append(" ago");
|
buf.append(" close received ").append(DataHelper.formatDuration(_context.clock().now() - getCloseReceivedOn())).append(" ago");
|
||||||
buf.append(" sent: ").append(1 + _lastSendId.get());
|
buf.append(" sent: ").append(1 + _lastSendId.get());
|
||||||
if (_inputStream != null)
|
buf.append(" rcvd: ").append(1 + _inputStream.getHighestBlockId() - missing);
|
||||||
buf.append(" rcvd: ").append(1 + _inputStream.getHighestBlockId() - missing);
|
|
||||||
|
|
||||||
buf.append(" maxWin ").append(getOptions().getMaxWindowSize());
|
buf.append(" maxWin ").append(getOptions().getMaxWindowSize());
|
||||||
buf.append(" MTU ").append(getOptions().getMaxMessageSize());
|
buf.append(" MTU ").append(getOptions().getMaxMessageSize());
|
||||||
@ -1086,14 +1103,15 @@ class Connection {
|
|||||||
* there are other packets in flight. 3 takes forever, let's try 2.
|
* there are other packets in flight. 3 takes forever, let's try 2.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
static final int FAST_RETRANSMIT_THRESHOLD = 2;
|
static final int FAST_RETRANSMIT_THRESHOLD = 3;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Coordinate the resends of a given packet
|
* Coordinate the resends of a given packet
|
||||||
*/
|
*/
|
||||||
class ResendPacketEvent extends SimpleTimer2.TimedEvent {
|
class ResendPacketEvent extends SimpleTimer2.TimedEvent {
|
||||||
private PacketLocal _packet;
|
private final PacketLocal _packet;
|
||||||
private long _nextSendTime;
|
private long _nextSendTime;
|
||||||
|
|
||||||
public ResendPacketEvent(PacketLocal packet, long delay) {
|
public ResendPacketEvent(PacketLocal packet, long delay) {
|
||||||
super(_timer);
|
super(_timer);
|
||||||
_packet = packet;
|
_packet = packet;
|
||||||
@ -1111,6 +1129,8 @@ class Connection {
|
|||||||
* we have to use forceReschedule() instead of schedule() below,
|
* we have to use forceReschedule() instead of schedule() below,
|
||||||
* to prevent duplicates in the timer queue.
|
* to prevent duplicates in the timer queue.
|
||||||
*
|
*
|
||||||
|
* don't synchronize this, deadlock with ackPackets->ackReceived->SimpleTimer2.cancel
|
||||||
|
*
|
||||||
* @param penalize true if this retransmission is caused by a timeout, false if we
|
* @param penalize true if this retransmission is caused by a timeout, false if we
|
||||||
* are just sending this packet instead of an ACK
|
* are just sending this packet instead of an ACK
|
||||||
* @return true if the packet was sent, false if it was not
|
* @return true if the packet was sent, false if it was not
|
||||||
@ -1131,7 +1151,12 @@ class Connection {
|
|||||||
boolean resend = false;
|
boolean resend = false;
|
||||||
boolean isLowest = false;
|
boolean isLowest = false;
|
||||||
synchronized (_outboundPackets) {
|
synchronized (_outboundPackets) {
|
||||||
if (_packet.getSequenceNum() == _highestAckedThrough + 1)
|
// allow appx. half the window to be "lowest" and be active resends, minimum of 3
|
||||||
|
// Note: we should really pick the N lowest, not the lowest one + N more who
|
||||||
|
// happen to get here next, as the timers get out-of-order esp. after fast retx
|
||||||
|
if (_packet.getSequenceNum() == _highestAckedThrough + 1 ||
|
||||||
|
_packet.getNumSends() > 1 ||
|
||||||
|
_activeResends < Math.max(3, (_options.getWindowSize() + 1) / 2))
|
||||||
isLowest = true;
|
isLowest = true;
|
||||||
if (_outboundPackets.containsKey(Long.valueOf(_packet.getSequenceNum())))
|
if (_outboundPackets.containsKey(Long.valueOf(_packet.getSequenceNum())))
|
||||||
resend = true;
|
resend = true;
|
||||||
@ -1145,24 +1170,28 @@ class Connection {
|
|||||||
// BUG? seq# = 0, activeResends = 0, loop forever - why?
|
// BUG? seq# = 0, activeResends = 0, loop forever - why?
|
||||||
// also seen with seq# > 0. Is the _activeResends count reliable?
|
// also seen with seq# > 0. Is the _activeResends count reliable?
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("Delaying resend of " + _packet + " as there are "
|
_log.info("Delaying resend of " + _packet + " with "
|
||||||
+ _activeResends + " active resends already in play");
|
+ _activeResends + " active resend, "
|
||||||
forceReschedule(1000);
|
+ _outboundPackets.size() + " unacked, window size = " + _options.getWindowSize());
|
||||||
_nextSendTime = 1000 + _context.clock().now();
|
forceReschedule(1333);
|
||||||
|
_nextSendTime = 1333 + _context.clock().now();
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// It's the lowest, or it's fast retransmit time. Resend the packet.
|
||||||
|
|
||||||
if (fastRetransmit)
|
if (fastRetransmit)
|
||||||
_context.statManager().addRateData("stream.fastRetransmit", _packet.getLifetime(), _packet.getLifetime());
|
_context.statManager().addRateData("stream.fastRetransmit", _packet.getLifetime(), _packet.getLifetime());
|
||||||
|
|
||||||
// revamp various fields, in case we need to ack more, etc
|
// revamp various fields, in case we need to ack more, etc
|
||||||
_inputStream.updateAcks(_packet);
|
// updateAcks done in enqueue()
|
||||||
|
//_inputStream.updateAcks(_packet);
|
||||||
int choke = getOptions().getChoke();
|
int choke = getOptions().getChoke();
|
||||||
_packet.setOptionalDelay(choke);
|
_packet.setOptionalDelay(choke);
|
||||||
if (choke > 0)
|
if (choke > 0)
|
||||||
_packet.setFlag(Packet.FLAG_DELAY_REQUESTED);
|
_packet.setFlag(Packet.FLAG_DELAY_REQUESTED);
|
||||||
// this seems unnecessary to send the MSS again:
|
// this seems unnecessary to send the MSS again:
|
||||||
_packet.setOptionalMaxSize(getOptions().getMaxMessageSize());
|
//_packet.setOptionalMaxSize(getOptions().getMaxMessageSize());
|
||||||
// bugfix release 0.7.8, we weren't dividing by 1000
|
// bugfix release 0.7.8, we weren't dividing by 1000
|
||||||
_packet.setResendDelay(getOptions().getResendDelay() / 1000);
|
_packet.setResendDelay(getOptions().getResendDelay() / 1000);
|
||||||
if (_packet.getReceiveStreamId() <= 0)
|
if (_packet.getReceiveStreamId() <= 0)
|
||||||
@ -1186,7 +1215,7 @@ class Connection {
|
|||||||
getOptions().setWindowSize(newWindowSize);
|
getOptions().setWindowSize(newWindowSize);
|
||||||
|
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Congestion resending packet " + _packet.getSequenceNum() + ": new windowSize " + newWindowSize
|
_log.warn("Congestion, resending packet " + _packet.getSequenceNum() + " (new windowSize " + newWindowSize
|
||||||
+ "/" + getOptions().getWindowSize() + ") for " + Connection.this.toString());
|
+ "/" + getOptions().getWindowSize() + ") for " + Connection.this.toString());
|
||||||
|
|
||||||
windowAdjusted();
|
windowAdjusted();
|
||||||
@ -1195,10 +1224,6 @@ class Connection {
|
|||||||
|
|
||||||
int numSends = _packet.getNumSends() + 1;
|
int numSends = _packet.getNumSends() + 1;
|
||||||
|
|
||||||
if (numSends == 2) {
|
|
||||||
// first resend for this packet
|
|
||||||
_activeResends++;
|
|
||||||
}
|
|
||||||
|
|
||||||
// in case things really suck, the other side may have lost thier
|
// in case things really suck, the other side may have lost thier
|
||||||
// session tags (e.g. they restarted), so jump back to ElGamal.
|
// session tags (e.g. they restarted), so jump back to ElGamal.
|
||||||
@ -1225,27 +1250,34 @@ class Connection {
|
|||||||
// set this before enqueue() as it passes it on to the router
|
// set this before enqueue() as it passes it on to the router
|
||||||
_nextSendTime = timeout + _context.clock().now();
|
_nextSendTime = timeout + _context.clock().now();
|
||||||
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_outboundQueue.enqueue(_packet)) {
|
||||||
_log.info("Resend packet " + _packet + " time " + numSends +
|
// first resend for this packet ?
|
||||||
|
if (numSends == 2)
|
||||||
|
_activeResends++;
|
||||||
|
if (_log.shouldLog(Log.INFO))
|
||||||
|
_log.info("Resent packet " +
|
||||||
|
(fastRetransmit ? "(fast) " : "(timeout) ") +
|
||||||
|
_packet +
|
||||||
|
" next resend in " + timeout + "ms" +
|
||||||
" activeResends: " + _activeResends +
|
" activeResends: " + _activeResends +
|
||||||
" (wsize "
|
" (wsize "
|
||||||
+ newWindowSize + " lifetime "
|
+ newWindowSize + " lifetime "
|
||||||
+ (_context.clock().now() - _packet.getCreatedOn()) + "ms)");
|
+ (_context.clock().now() - _packet.getCreatedOn()) + "ms)");
|
||||||
_outboundQueue.enqueue(_packet);
|
_unackedPacketsReceived = 0;
|
||||||
_lastSendTime = _context.clock().now();
|
_lastSendTime = _context.clock().now();
|
||||||
|
// timer reset added 0.9.1
|
||||||
|
resetActivityTimer();
|
||||||
|
}
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("Scheduling resend in " + timeout + "ms for " + _packet);
|
|
||||||
forceReschedule(timeout);
|
forceReschedule(timeout);
|
||||||
}
|
}
|
||||||
|
|
||||||
// acked during resending (... or somethin')
|
// acked during resending (... or somethin') ????????????
|
||||||
if ( (_packet.getAckTime() > 0) && (_packet.getNumSends() > 1) ) {
|
if ( (_packet.getAckTime() > 0) && (_packet.getNumSends() > 1) ) {
|
||||||
_activeResends--;
|
_activeResends--;
|
||||||
synchronized (_outboundPackets) {
|
synchronized (_outboundPackets) {
|
||||||
_outboundPackets.notifyAll();
|
_outboundPackets.notifyAll();
|
||||||
}
|
}
|
||||||
return true;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -10,7 +10,12 @@ import net.i2p.util.Log;
|
|||||||
* do NOT block, but they also do not necessary imply immediate
|
* do NOT block, but they also do not necessary imply immediate
|
||||||
* delivery, or even the generation of a new packet. This class
|
* delivery, or even the generation of a new packet. This class
|
||||||
* is the only one that builds useful outbound Packet objects.
|
* is the only one that builds useful outbound Packet objects.
|
||||||
*
|
*<p>
|
||||||
|
* MessageOutputStream -> ConnectionDataReceiver -> Connection -> PacketQueue -> I2PSession
|
||||||
|
*<p>
|
||||||
|
* There's one of these per MessageOutputStream.
|
||||||
|
* It stores no state. It sends everything to the Connection unless
|
||||||
|
* the Connection is closed,
|
||||||
*/
|
*/
|
||||||
class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
||||||
private final I2PAppContext _context;
|
private final I2PAppContext _context;
|
||||||
@ -82,7 +87,7 @@ class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
|||||||
if (_log.shouldLog(Log.INFO) && !doSend)
|
if (_log.shouldLog(Log.INFO) && !doSend)
|
||||||
_log.info("writeData called: size="+size + " doSend=" + doSend
|
_log.info("writeData called: size="+size + " doSend=" + doSend
|
||||||
+ " unackedReceived: " + con.getUnackedPacketsReceived()
|
+ " unackedReceived: " + con.getUnackedPacketsReceived()
|
||||||
+ " con: " + con, new Exception("write called by"));
|
+ " con: " + con /* , new Exception("write called by") */ );
|
||||||
|
|
||||||
if (doSend) {
|
if (doSend) {
|
||||||
PacketLocal packet = send(buf, off, size);
|
PacketLocal packet = send(buf, off, size);
|
||||||
@ -111,6 +116,7 @@ class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
|||||||
public PacketLocal send(byte buf[], int off, int size) {
|
public PacketLocal send(byte buf[], int off, int size) {
|
||||||
return send(buf, off, size, false);
|
return send(buf, off, size, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param buf data to be sent - may be null
|
* @param buf data to be sent - may be null
|
||||||
* @param off offset into the buffer to start writing from
|
* @param off offset into the buffer to start writing from
|
||||||
@ -120,22 +126,20 @@ class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
|||||||
* @return the packet sent
|
* @return the packet sent
|
||||||
*/
|
*/
|
||||||
public PacketLocal send(byte buf[], int off, int size, boolean forceIncrement) {
|
public PacketLocal send(byte buf[], int off, int size, boolean forceIncrement) {
|
||||||
Connection con = _connection;
|
//long before = System.currentTimeMillis();
|
||||||
//if (con == null) return null;
|
PacketLocal packet = buildPacket(buf, off, size, forceIncrement);
|
||||||
long before = System.currentTimeMillis();
|
//long built = System.currentTimeMillis();
|
||||||
PacketLocal packet = buildPacket(con, buf, off, size, forceIncrement);
|
_connection.sendPacket(packet);
|
||||||
long built = System.currentTimeMillis();
|
//long sent = System.currentTimeMillis();
|
||||||
con.sendPacket(packet);
|
|
||||||
long sent = System.currentTimeMillis();
|
|
||||||
|
|
||||||
if ( (built-before > 5*1000) && (_log.shouldLog(Log.WARN)) )
|
//if ( (built-before > 5*1000) && (_log.shouldLog(Log.WARN)) )
|
||||||
_log.warn("wtf, took " + (built-before) + "ms to build a packet: " + packet);
|
// _log.warn("wtf, took " + (built-before) + "ms to build a packet: " + packet);
|
||||||
if ( (sent-built> 5*1000) && (_log.shouldLog(Log.WARN)) )
|
//if ( (sent-built> 5*1000) && (_log.shouldLog(Log.WARN)) )
|
||||||
_log.warn("wtf, took " + (sent-built) + "ms to send a packet: " + packet);
|
// _log.warn("wtf, took " + (sent-built) + "ms to send a packet: " + packet);
|
||||||
return packet;
|
return packet;
|
||||||
}
|
}
|
||||||
|
|
||||||
private boolean isAckOnly(Connection con, int size) {
|
private static boolean isAckOnly(Connection con, int size) {
|
||||||
boolean ackOnly = ( (size <= 0) && // no data
|
boolean ackOnly = ( (size <= 0) && // no data
|
||||||
(con.getLastSendId() >= 0) && // not a SYN
|
(con.getLastSendId() >= 0) && // not a SYN
|
||||||
( (!con.getOutputStream().getClosed()) || // not a CLOSE
|
( (!con.getOutputStream().getClosed()) || // not a CLOSE
|
||||||
@ -144,7 +148,16 @@ class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
|||||||
return ackOnly;
|
return ackOnly;
|
||||||
}
|
}
|
||||||
|
|
||||||
private PacketLocal buildPacket(Connection con, byte buf[], int off, int size, boolean forceIncrement) {
|
/**
|
||||||
|
* @param buf data to be sent - may be null
|
||||||
|
* @param off offset into the buffer to start writing from
|
||||||
|
* @param size how many bytes of the buffer to write (may be 0)
|
||||||
|
* @param forceIncrement even if the buffer is empty, increment the packetId
|
||||||
|
* so we get an ACK back
|
||||||
|
* @return the packet to be sent
|
||||||
|
*/
|
||||||
|
private PacketLocal buildPacket(byte buf[], int off, int size, boolean forceIncrement) {
|
||||||
|
Connection con = _connection;
|
||||||
if (size > Packet.MAX_PAYLOAD_SIZE) throw new IllegalArgumentException("size is too large (" + size + ")");
|
if (size > Packet.MAX_PAYLOAD_SIZE) throw new IllegalArgumentException("size is too large (" + size + ")");
|
||||||
boolean ackOnly = isAckOnly(con, size);
|
boolean ackOnly = isAckOnly(con, size);
|
||||||
boolean isFirst = (con.getAckedPackets() <= 0) && (con.getUnackedPacketsSent() <= 0);
|
boolean isFirst = (con.getAckedPackets() <= 0) && (con.getUnackedPacketsSent() <= 0);
|
||||||
@ -164,7 +177,8 @@ class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
|||||||
packet.setSendStreamId(con.getSendStreamId());
|
packet.setSendStreamId(con.getSendStreamId());
|
||||||
packet.setReceiveStreamId(con.getReceiveStreamId());
|
packet.setReceiveStreamId(con.getReceiveStreamId());
|
||||||
|
|
||||||
con.getInputStream().updateAcks(packet);
|
// not needed here, handled in PacketQueue.enqueue()
|
||||||
|
//con.getInputStream().updateAcks(packet);
|
||||||
// note that the optional delay is usually rewritten in Connection.sendPacket()
|
// note that the optional delay is usually rewritten in Connection.sendPacket()
|
||||||
int choke = con.getOptions().getChoke();
|
int choke = con.getOptions().getChoke();
|
||||||
packet.setOptionalDelay(choke);
|
packet.setOptionalDelay(choke);
|
||||||
@ -195,6 +209,7 @@ class ConnectionDataReceiver implements MessageOutputStream.DataReceiver {
|
|||||||
// don't set the closed flag if this is a plain ACK and there are outstanding
|
// don't set the closed flag if this is a plain ACK and there are outstanding
|
||||||
// packets sent, otherwise the other side could receive the CLOSE prematurely,
|
// packets sent, otherwise the other side could receive the CLOSE prematurely,
|
||||||
// since this ACK could arrive before the unacked payload message.
|
// since this ACK could arrive before the unacked payload message.
|
||||||
|
// TODO if the only unacked packet is the CLOSE packet and it didn't have any data...
|
||||||
if (con.getOutputStream().getClosed() &&
|
if (con.getOutputStream().getClosed() &&
|
||||||
( (size > 0) || (con.getUnackedPacketsSent() <= 0) || (packet.getSequenceNum() > 0) ) ) {
|
( (size > 0) || (con.getUnackedPacketsSent() <= 0) || (packet.getSequenceNum() > 0) ) ) {
|
||||||
packet.setFlag(Packet.FLAG_CLOSE);
|
packet.setFlag(Packet.FLAG_CLOSE);
|
||||||
|
@ -75,6 +75,7 @@ class ConnectionManager {
|
|||||||
/** Socket timeout for accept() */
|
/** Socket timeout for accept() */
|
||||||
_soTimeout = -1;
|
_soTimeout = -1;
|
||||||
|
|
||||||
|
// Stats for this class
|
||||||
_context.statManager().createRateStat("stream.con.lifetimeMessagesSent", "How many messages do we send on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
_context.statManager().createRateStat("stream.con.lifetimeMessagesSent", "How many messages do we send on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||||
_context.statManager().createRateStat("stream.con.lifetimeMessagesReceived", "How many messages do we receive on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
_context.statManager().createRateStat("stream.con.lifetimeMessagesReceived", "How many messages do we receive on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||||
_context.statManager().createRateStat("stream.con.lifetimeBytesSent", "How many bytes do we send on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
_context.statManager().createRateStat("stream.con.lifetimeBytesSent", "How many bytes do we send on a stream?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||||
@ -85,6 +86,14 @@ class ConnectionManager {
|
|||||||
_context.statManager().createRateStat("stream.con.lifetimeCongestionSeenAt", "When was the last congestion seen at when a stream closes?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
_context.statManager().createRateStat("stream.con.lifetimeCongestionSeenAt", "When was the last congestion seen at when a stream closes?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||||
_context.statManager().createRateStat("stream.con.lifetimeSendWindowSize", "What is the final send window size when a stream closes?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
_context.statManager().createRateStat("stream.con.lifetimeSendWindowSize", "What is the final send window size when a stream closes?", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||||
_context.statManager().createRateStat("stream.receiveActive", "How many streams are active when a new one is received (period being not yet dropped)", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
_context.statManager().createRateStat("stream.receiveActive", "How many streams are active when a new one is received (period being not yet dropped)", "Stream", new long[] { 60*60*1000, 24*60*60*1000 });
|
||||||
|
// Stats for Connection
|
||||||
|
_context.statManager().createRateStat("stream.con.windowSizeAtCongestion", "How large was our send window when we send a dup?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||||
|
_context.statManager().createRateStat("stream.chokeSizeBegin", "How many messages were outstanding when we started to choke?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||||
|
_context.statManager().createRateStat("stream.chokeSizeEnd", "How many messages were outstanding when we stopped being choked?", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||||
|
_context.statManager().createRateStat("stream.fastRetransmit", "How long a packet has been around for if it has been resent per the fast retransmit timer?", "Stream", new long[] { 60*1000, 10*60*1000 });
|
||||||
|
// Stats for PacketQueue
|
||||||
|
_context.statManager().createRateStat("stream.con.sendMessageSize", "Size of a message sent on a connection", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||||
|
_context.statManager().createRateStat("stream.con.sendDuplicateSize", "Size of a message resent on a connection", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
||||||
}
|
}
|
||||||
|
|
||||||
Connection getConnectionByInboundId(long id) {
|
Connection getConnectionByInboundId(long id) {
|
||||||
@ -420,13 +429,11 @@ class ConnectionManager {
|
|||||||
if (removed) {
|
if (removed) {
|
||||||
_context.statManager().addRateData("stream.con.lifetimeMessagesSent", 1+con.getLastSendId(), con.getLifetime());
|
_context.statManager().addRateData("stream.con.lifetimeMessagesSent", 1+con.getLastSendId(), con.getLifetime());
|
||||||
MessageInputStream stream = con.getInputStream();
|
MessageInputStream stream = con.getInputStream();
|
||||||
if (stream != null) {
|
|
||||||
long rcvd = 1 + stream.getHighestBlockId();
|
long rcvd = 1 + stream.getHighestBlockId();
|
||||||
long nacks[] = stream.getNacks();
|
long nacks[] = stream.getNacks();
|
||||||
if (nacks != null)
|
if (nacks != null)
|
||||||
rcvd -= nacks.length;
|
rcvd -= nacks.length;
|
||||||
_context.statManager().addRateData("stream.con.lifetimeMessagesReceived", rcvd, con.getLifetime());
|
_context.statManager().addRateData("stream.con.lifetimeMessagesReceived", rcvd, con.getLifetime());
|
||||||
}
|
|
||||||
_context.statManager().addRateData("stream.con.lifetimeBytesSent", con.getLifetimeBytesSent(), con.getLifetime());
|
_context.statManager().addRateData("stream.con.lifetimeBytesSent", con.getLifetimeBytesSent(), con.getLifetime());
|
||||||
_context.statManager().addRateData("stream.con.lifetimeBytesReceived", con.getLifetimeBytesReceived(), con.getLifetime());
|
_context.statManager().addRateData("stream.con.lifetimeBytesReceived", con.getLifetimeBytesReceived(), con.getLifetime());
|
||||||
_context.statManager().addRateData("stream.con.lifetimeDupMessagesSent", con.getLifetimeDupMessagesSent(), con.getLifetime());
|
_context.statManager().addRateData("stream.con.lifetimeDupMessagesSent", con.getLifetimeDupMessagesSent(), con.getLifetime());
|
||||||
|
@ -98,15 +98,17 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
public static final int DEFAULT_INITIAL_ACK_DELAY = 2*1000;
|
public static final int DEFAULT_INITIAL_ACK_DELAY = 2*1000;
|
||||||
static final int MIN_WINDOW_SIZE = 1;
|
static final int MIN_WINDOW_SIZE = 1;
|
||||||
private static final boolean DEFAULT_ANSWER_PINGS = true;
|
private static final boolean DEFAULT_ANSWER_PINGS = true;
|
||||||
|
private static final int DEFAULT_INACTIVITY_TIMEOUT = 90*1000;
|
||||||
|
private static final int DEFAULT_INACTIVITY_ACTION = INACTIVITY_ACTION_SEND;
|
||||||
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If PROTO is enforced, we cannot communicate with destinations earlier than version 0.7.1.
|
* If PROTO is enforced, we cannot communicate with destinations earlier than version 0.7.1.
|
||||||
* @since 0.9.1
|
* @since 0.9.1
|
||||||
*/
|
*/
|
||||||
private static final boolean DEFAULT_ENFORCE_PROTO = false;
|
private static final boolean DEFAULT_ENFORCE_PROTO = false;
|
||||||
|
|
||||||
// Syncronization fix, but doing it this way causes NPE...
|
private final int _trend[] = new int[TREND_COUNT];
|
||||||
// FIXME private final int _trend[] = new int[TREND_COUNT]; FIXME
|
|
||||||
private int _trend[];
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* OK, here is the calculation on the message size to fit in a single
|
* OK, here is the calculation on the message size to fit in a single
|
||||||
@ -220,6 +222,7 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
*/
|
*/
|
||||||
public ConnectionOptions() {
|
public ConnectionOptions() {
|
||||||
super();
|
super();
|
||||||
|
cinit(System.getProperties());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -229,6 +232,7 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
*/
|
*/
|
||||||
public ConnectionOptions(Properties opts) {
|
public ConnectionOptions(Properties opts) {
|
||||||
super(opts);
|
super(opts);
|
||||||
|
cinit(opts);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -237,6 +241,7 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
*/
|
*/
|
||||||
public ConnectionOptions(I2PSocketOptions opts) {
|
public ConnectionOptions(I2PSocketOptions opts) {
|
||||||
super(opts);
|
super(opts);
|
||||||
|
cinit(System.getProperties());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -245,6 +250,7 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
*/
|
*/
|
||||||
public ConnectionOptions(ConnectionOptions opts) {
|
public ConnectionOptions(ConnectionOptions opts) {
|
||||||
super(opts);
|
super(opts);
|
||||||
|
cinit(System.getProperties());
|
||||||
if (opts != null)
|
if (opts != null)
|
||||||
update(opts);
|
update(opts);
|
||||||
}
|
}
|
||||||
@ -302,10 +308,10 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
_maxTotalConnsPerDay = opts.getMaxTotalConnsPerDay();
|
_maxTotalConnsPerDay = opts.getMaxTotalConnsPerDay();
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
/**
|
||||||
protected void init(Properties opts) {
|
* Initialization
|
||||||
super.init(opts);
|
*/
|
||||||
_trend = new int[TREND_COUNT];
|
private void cinit(Properties opts) {
|
||||||
setMaxWindowSize(getInt(opts, PROP_MAX_WINDOW_SIZE, Connection.MAX_WINDOW_SIZE));
|
setMaxWindowSize(getInt(opts, PROP_MAX_WINDOW_SIZE, Connection.MAX_WINDOW_SIZE));
|
||||||
setConnectDelay(getInt(opts, PROP_CONNECT_DELAY, -1));
|
setConnectDelay(getInt(opts, PROP_CONNECT_DELAY, -1));
|
||||||
setProfile(getInt(opts, PROP_PROFILE, PROFILE_BULK));
|
setProfile(getInt(opts, PROP_PROFILE, PROFILE_BULK));
|
||||||
@ -318,8 +324,8 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
setMaxResends(getInt(opts, PROP_MAX_RESENDS, DEFAULT_MAX_SENDS));
|
setMaxResends(getInt(opts, PROP_MAX_RESENDS, DEFAULT_MAX_SENDS));
|
||||||
// handled in super()
|
// handled in super()
|
||||||
//setWriteTimeout(getInt(opts, PROP_WRITE_TIMEOUT, -1));
|
//setWriteTimeout(getInt(opts, PROP_WRITE_TIMEOUT, -1));
|
||||||
setInactivityTimeout(getInt(opts, PROP_INACTIVITY_TIMEOUT, 90*1000));
|
setInactivityTimeout(getInt(opts, PROP_INACTIVITY_TIMEOUT, DEFAULT_INACTIVITY_TIMEOUT));
|
||||||
setInactivityAction(getInt(opts, PROP_INACTIVITY_ACTION, INACTIVITY_ACTION_SEND));
|
setInactivityAction(getInt(opts, PROP_INACTIVITY_ACTION, DEFAULT_INACTIVITY_ACTION));
|
||||||
setInboundBufferSize(getMaxMessageSize() * (Connection.MAX_WINDOW_SIZE + 2));
|
setInboundBufferSize(getMaxMessageSize() * (Connection.MAX_WINDOW_SIZE + 2));
|
||||||
setCongestionAvoidanceGrowthRateFactor(getInt(opts, PROP_CONGESTION_AVOIDANCE_GROWTH_RATE_FACTOR, 1));
|
setCongestionAvoidanceGrowthRateFactor(getInt(opts, PROP_CONGESTION_AVOIDANCE_GROWTH_RATE_FACTOR, 1));
|
||||||
setSlowStartGrowthRateFactor(getInt(opts, PROP_SLOW_START_GROWTH_RATE_FACTOR, 1));
|
setSlowStartGrowthRateFactor(getInt(opts, PROP_SLOW_START_GROWTH_RATE_FACTOR, 1));
|
||||||
@ -367,9 +373,9 @@ class ConnectionOptions extends I2PSocketOptionsImpl {
|
|||||||
//if (opts.containsKey(PROP_WRITE_TIMEOUT))
|
//if (opts.containsKey(PROP_WRITE_TIMEOUT))
|
||||||
// setWriteTimeout(getInt(opts, PROP_WRITE_TIMEOUT, -1));
|
// setWriteTimeout(getInt(opts, PROP_WRITE_TIMEOUT, -1));
|
||||||
if (opts.containsKey(PROP_INACTIVITY_TIMEOUT))
|
if (opts.containsKey(PROP_INACTIVITY_TIMEOUT))
|
||||||
setInactivityTimeout(getInt(opts, PROP_INACTIVITY_TIMEOUT, 90*1000));
|
setInactivityTimeout(getInt(opts, PROP_INACTIVITY_TIMEOUT, DEFAULT_INACTIVITY_TIMEOUT));
|
||||||
if (opts.containsKey(PROP_INACTIVITY_ACTION))
|
if (opts.containsKey(PROP_INACTIVITY_ACTION))
|
||||||
setInactivityAction(getInt(opts, PROP_INACTIVITY_ACTION, INACTIVITY_ACTION_SEND));
|
setInactivityAction(getInt(opts, PROP_INACTIVITY_ACTION, DEFAULT_INACTIVITY_ACTION));
|
||||||
setInboundBufferSize(getMaxMessageSize() * (Connection.MAX_WINDOW_SIZE + 2));
|
setInboundBufferSize(getMaxMessageSize() * (Connection.MAX_WINDOW_SIZE + 2));
|
||||||
if (opts.contains(PROP_CONGESTION_AVOIDANCE_GROWTH_RATE_FACTOR))
|
if (opts.contains(PROP_CONGESTION_AVOIDANCE_GROWTH_RATE_FACTOR))
|
||||||
setCongestionAvoidanceGrowthRateFactor(getInt(opts, PROP_CONGESTION_AVOIDANCE_GROWTH_RATE_FACTOR, 2));
|
setCongestionAvoidanceGrowthRateFactor(getInt(opts, PROP_CONGESTION_AVOIDANCE_GROWTH_RATE_FACTOR, 2));
|
||||||
|
@ -12,7 +12,13 @@ import net.i2p.util.SimpleTimer;
|
|||||||
/**
|
/**
|
||||||
* Receive a packet for a particular connection - placing the data onto the
|
* Receive a packet for a particular connection - placing the data onto the
|
||||||
* queue, marking packets as acked, updating various fields, etc.
|
* queue, marking packets as acked, updating various fields, etc.
|
||||||
*
|
*<p>
|
||||||
|
* I2PSession -> MessageHandler -> PacketHandler -> ConnectionPacketHandler -> MessageInputStream
|
||||||
|
*<p>
|
||||||
|
* One of these is instantiated per-Destination
|
||||||
|
* (i.e. per-ConnectionManager, not per-Connection).
|
||||||
|
* It doesn't store any state.
|
||||||
|
|
||||||
*/
|
*/
|
||||||
class ConnectionPacketHandler {
|
class ConnectionPacketHandler {
|
||||||
private final I2PAppContext _context;
|
private final I2PAppContext _context;
|
||||||
@ -94,19 +100,24 @@ class ConnectionPacketHandler {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
long ready = con.getInputStream().getHighestReadyBockId();
|
if (packet.getPayloadSize() > 0) {
|
||||||
int available = con.getOptions().getInboundBufferSize() - con.getInputStream().getTotalReadySize();
|
// Here, for the purposes of calculating whether the input stream is full,
|
||||||
int allowedBlocks = available/con.getOptions().getMaxMessageSize();
|
// we assume all the not-ready blocks are the max message size.
|
||||||
if ( (packet.getPayloadSize() > 0) && (packet.getSequenceNum() > ready + allowedBlocks) ) {
|
// This prevents us from getting DoSed by accepting unlimited out-of-order small messages
|
||||||
if (_log.shouldLog(Log.WARN))
|
long ready = con.getInputStream().getHighestReadyBockId();
|
||||||
_log.warn("Inbound buffer exceeded on connection " + con + " ("
|
int available = con.getOptions().getInboundBufferSize() - con.getInputStream().getTotalReadySize();
|
||||||
+ ready + "/"+ (ready+allowedBlocks) + "/" + available
|
int allowedBlocks = available/con.getOptions().getMaxMessageSize();
|
||||||
+ ": dropping " + packet);
|
if (packet.getSequenceNum() > ready + allowedBlocks) {
|
||||||
ack(con, packet.getAckThrough(), packet.getNacks(), null, false, choke);
|
if (_log.shouldLog(Log.WARN))
|
||||||
con.getOptions().setChoke(61*1000);
|
_log.warn("Inbound buffer exceeded on connection " + con + " ("
|
||||||
packet.releasePayload();
|
+ ready + "/"+ (ready+allowedBlocks) + "/" + available
|
||||||
con.ackImmediately();
|
+ ": dropping " + packet);
|
||||||
return;
|
ack(con, packet.getAckThrough(), packet.getNacks(), null, false, choke);
|
||||||
|
con.getOptions().setChoke(61*1000);
|
||||||
|
packet.releasePayload();
|
||||||
|
con.ackImmediately();
|
||||||
|
return;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
con.getOptions().setChoke(0);
|
con.getOptions().setChoke(0);
|
||||||
|
|
||||||
@ -513,12 +524,14 @@ class ConnectionPacketHandler {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private class AckDup implements SimpleTimer.TimedEvent {
|
private class AckDup implements SimpleTimer.TimedEvent {
|
||||||
private long _created;
|
private final long _created;
|
||||||
private Connection _con;
|
private final Connection _con;
|
||||||
|
|
||||||
public AckDup(Connection con) {
|
public AckDup(Connection con) {
|
||||||
_created = _context.clock().now();
|
_created = _context.clock().now();
|
||||||
_con = con;
|
_con = con;
|
||||||
}
|
}
|
||||||
|
|
||||||
public void timeReached() {
|
public void timeReached() {
|
||||||
if (_con.getLastSendTime() <= _created) {
|
if (_con.getLastSendTime() <= _created) {
|
||||||
if (_con.getResetReceived() || _con.getResetSent()) {
|
if (_con.getResetReceived() || _con.getResetSent()) {
|
||||||
|
@ -13,7 +13,8 @@ import net.i2p.util.Log;
|
|||||||
/**
|
/**
|
||||||
* Receive raw information from the I2PSession and turn it into
|
* Receive raw information from the I2PSession and turn it into
|
||||||
* Packets, if we can.
|
* Packets, if we can.
|
||||||
*
|
*<p>
|
||||||
|
* I2PSession -> MessageHandler -> PacketHandler -> ConnectionPacketHandler -> MessageInputStream
|
||||||
*/
|
*/
|
||||||
class MessageHandler implements I2PSessionMuxedListener {
|
class MessageHandler implements I2PSessionMuxedListener {
|
||||||
private final ConnectionManager _manager;
|
private final ConnectionManager _manager;
|
||||||
|
@ -16,6 +16,11 @@ import net.i2p.util.Log;
|
|||||||
/**
|
/**
|
||||||
* Stream that can be given messages out of order
|
* Stream that can be given messages out of order
|
||||||
* yet present them in order.
|
* yet present them in order.
|
||||||
|
*<p>
|
||||||
|
* I2PSession -> MessageHandler -> PacketHandler -> ConnectionPacketHandler -> MessageInputStream
|
||||||
|
*<p>
|
||||||
|
* This buffers unlimited data via messageReceived() -
|
||||||
|
* limiting / blocking is done in ConnectionPacketHandler.receivePacket().
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class MessageInputStream extends InputStream {
|
class MessageInputStream extends InputStream {
|
||||||
@ -113,6 +118,9 @@ class MessageInputStream extends InputStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Adds the ack-through and nack fields to a packet we are building for transmission
|
||||||
|
*/
|
||||||
public void updateAcks(PacketLocal packet) {
|
public void updateAcks(PacketLocal packet) {
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
packet.setAckThrough(_highestBlockId);
|
packet.setAckThrough(_highestBlockId);
|
||||||
@ -126,6 +134,7 @@ class MessageInputStream extends InputStream {
|
|||||||
*
|
*
|
||||||
* @return block IDs greater than the highest ready block ID, or null if there aren't any.
|
* @return block IDs greater than the highest ready block ID, or null if there aren't any.
|
||||||
*/
|
*/
|
||||||
|
/***
|
||||||
public long[] getOutOfOrderBlocks() {
|
public long[] getOutOfOrderBlocks() {
|
||||||
long blocks[] = null;
|
long blocks[] = null;
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
@ -140,15 +149,18 @@ class MessageInputStream extends InputStream {
|
|||||||
Arrays.sort(blocks);
|
Arrays.sort(blocks);
|
||||||
return blocks;
|
return blocks;
|
||||||
}
|
}
|
||||||
|
***/
|
||||||
|
|
||||||
/** how many blocks have we received that we still have holes before?
|
/** how many blocks have we received that we still have holes before?
|
||||||
* @return Count of blocks received that still have holes
|
* @return Count of blocks received that still have holes
|
||||||
*/
|
*/
|
||||||
|
/***
|
||||||
public int getOutOfOrderBlockCount() {
|
public int getOutOfOrderBlockCount() {
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
return _notYetReadyBlocks.size();
|
return _notYetReadyBlocks.size();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
***/
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* how long a read() call should block (if less than 0, block indefinitely,
|
* how long a read() call should block (if less than 0, block indefinitely,
|
||||||
@ -157,8 +169,8 @@ class MessageInputStream extends InputStream {
|
|||||||
*/
|
*/
|
||||||
public int getReadTimeout() { return _readTimeout; }
|
public int getReadTimeout() { return _readTimeout; }
|
||||||
public void setReadTimeout(int timeout) {
|
public void setReadTimeout(int timeout) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.info("Changing read timeout from " + _readTimeout + " to " + timeout);
|
_log.debug("Changing read timeout from " + _readTimeout + " to " + timeout);
|
||||||
_readTimeout = timeout;
|
_readTimeout = timeout;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,9 +217,9 @@ class MessageInputStream extends InputStream {
|
|||||||
* @return true if this is a new packet, false if it is a dup
|
* @return true if this is a new packet, false if it is a dup
|
||||||
*/
|
*/
|
||||||
public boolean messageReceived(long messageId, ByteArray payload) {
|
public boolean messageReceived(long messageId, ByteArray payload) {
|
||||||
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
|
_log.debug("received " + messageId + " with " + (payload != null ? payload.getValid()+"" : "no payload"));
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
|
||||||
_log.debug("received " + messageId + " with " + (payload != null ? payload.getValid()+"" : "no payload"));
|
|
||||||
if (messageId <= _highestReadyBlockId) {
|
if (messageId <= _highestReadyBlockId) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("ignoring dup message " + messageId);
|
_log.debug("ignoring dup message " + messageId);
|
||||||
@ -237,7 +249,6 @@ class MessageInputStream extends InputStream {
|
|||||||
cur++;
|
cur++;
|
||||||
_highestReadyBlockId++;
|
_highestReadyBlockId++;
|
||||||
}
|
}
|
||||||
_dataLock.notifyAll();
|
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("message is out of order: " + messageId);
|
_log.debug("message is out of order: " + messageId);
|
||||||
@ -245,8 +256,8 @@ class MessageInputStream extends InputStream {
|
|||||||
_notYetReadyBlocks.put(Long.valueOf(messageId), new ByteArray(null));
|
_notYetReadyBlocks.put(Long.valueOf(messageId), new ByteArray(null));
|
||||||
else
|
else
|
||||||
_notYetReadyBlocks.put(Long.valueOf(messageId), payload);
|
_notYetReadyBlocks.put(Long.valueOf(messageId), payload);
|
||||||
_dataLock.notifyAll();
|
|
||||||
}
|
}
|
||||||
|
_dataLock.notifyAll();
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
@ -278,7 +289,7 @@ class MessageInputStream extends InputStream {
|
|||||||
|
|
||||||
while (_readyDataBlocks.isEmpty()) {
|
while (_readyDataBlocks.isEmpty()) {
|
||||||
if (_locallyClosed)
|
if (_locallyClosed)
|
||||||
throw new IOException("Already closed, you wanker");
|
throw new IOException("Already closed");
|
||||||
|
|
||||||
if ( (_notYetReadyBlocks.isEmpty()) && (_closeReceived) ) {
|
if ( (_notYetReadyBlocks.isEmpty()) && (_closeReceived) ) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
@ -360,7 +371,7 @@ class MessageInputStream extends InputStream {
|
|||||||
|
|
||||||
@Override
|
@Override
|
||||||
public int available() throws IOException {
|
public int available() throws IOException {
|
||||||
if (_locallyClosed) throw new IOException("Already closed, you wanker");
|
if (_locallyClosed) throw new IOException("Already closed");
|
||||||
throwAnyError();
|
throwAnyError();
|
||||||
int numBytes = 0;
|
int numBytes = 0;
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
@ -373,7 +384,7 @@ class MessageInputStream extends InputStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("available(): " + numBytes + " " + toString());
|
_log.debug("available(): " + numBytes);
|
||||||
|
|
||||||
return numBytes;
|
return numBytes;
|
||||||
}
|
}
|
||||||
@ -384,6 +395,7 @@ class MessageInputStream extends InputStream {
|
|||||||
*
|
*
|
||||||
* @return Count of bytes waiting to be read
|
* @return Count of bytes waiting to be read
|
||||||
*/
|
*/
|
||||||
|
/***
|
||||||
public int getTotalQueuedSize() {
|
public int getTotalQueuedSize() {
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
if (_locallyClosed) return 0;
|
if (_locallyClosed) return 0;
|
||||||
@ -401,7 +413,11 @@ class MessageInputStream extends InputStream {
|
|||||||
return numBytes;
|
return numBytes;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
***/
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Same as available() but doesn't throw IOE
|
||||||
|
*/
|
||||||
public int getTotalReadySize() {
|
public int getTotalReadySize() {
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
if (_locallyClosed) return 0;
|
if (_locallyClosed) return 0;
|
||||||
|
@ -14,6 +14,8 @@ import net.i2p.util.SimpleTimer2;
|
|||||||
* A stream that we can shove data into that fires off those bytes
|
* A stream that we can shove data into that fires off those bytes
|
||||||
* on flush or when the buffer is full. It also blocks according
|
* on flush or when the buffer is full. It also blocks according
|
||||||
* to the data receiver's needs.
|
* to the data receiver's needs.
|
||||||
|
*<p>
|
||||||
|
* MessageOutputStream -> ConnectionDataReceiver -> Connection -> PacketQueue -> I2PSession
|
||||||
*/
|
*/
|
||||||
class MessageOutputStream extends OutputStream {
|
class MessageOutputStream extends OutputStream {
|
||||||
private final I2PAppContext _context;
|
private final I2PAppContext _context;
|
||||||
@ -21,17 +23,17 @@ class MessageOutputStream extends OutputStream {
|
|||||||
private byte _buf[];
|
private byte _buf[];
|
||||||
private int _valid;
|
private int _valid;
|
||||||
private final Object _dataLock;
|
private final Object _dataLock;
|
||||||
private DataReceiver _dataReceiver;
|
private final DataReceiver _dataReceiver;
|
||||||
private IOException _streamError;
|
private IOException _streamError;
|
||||||
private boolean _closed;
|
private volatile boolean _closed;
|
||||||
private long _written;
|
private long _written;
|
||||||
private int _writeTimeout;
|
private int _writeTimeout;
|
||||||
private ByteCache _dataCache;
|
private ByteCache _dataCache;
|
||||||
private final Flusher _flusher;
|
private final Flusher _flusher;
|
||||||
private long _lastFlushed;
|
private long _lastFlushed;
|
||||||
private long _lastBuffered;
|
private volatile long _lastBuffered;
|
||||||
/** if we enqueue data but don't flush it in this period, flush it passively */
|
/** if we enqueue data but don't flush it in this period, flush it passively */
|
||||||
private int _passiveFlushDelay;
|
private final int _passiveFlushDelay;
|
||||||
/**
|
/**
|
||||||
* if we are changing the buffer size during operation, set this to the new
|
* if we are changing the buffer size during operation, set this to the new
|
||||||
* buffer size, and next time we are flushing, update the _buf array to the new
|
* buffer size, and next time we are flushing, update the _buf array to the new
|
||||||
@ -39,9 +41,9 @@ class MessageOutputStream extends OutputStream {
|
|||||||
*/
|
*/
|
||||||
private volatile int _nextBufferSize;
|
private volatile int _nextBufferSize;
|
||||||
// rate calc helpers
|
// rate calc helpers
|
||||||
private long _sendPeriodBeginTime;
|
//private long _sendPeriodBeginTime;
|
||||||
private long _sendPeriodBytes;
|
//private long _sendPeriodBytes;
|
||||||
private int _sendBps;
|
//private int _sendBps;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Since this is less than i2ptunnel's i2p.streaming.connectDelay default of 1000,
|
* Since this is less than i2ptunnel's i2p.streaming.connectDelay default of 1000,
|
||||||
@ -73,16 +75,16 @@ class MessageOutputStream extends OutputStream {
|
|||||||
_writeTimeout = -1;
|
_writeTimeout = -1;
|
||||||
_passiveFlushDelay = passiveFlushDelay;
|
_passiveFlushDelay = passiveFlushDelay;
|
||||||
_nextBufferSize = -1;
|
_nextBufferSize = -1;
|
||||||
_sendPeriodBeginTime = ctx.clock().now();
|
//_sendPeriodBeginTime = ctx.clock().now();
|
||||||
_context.statManager().createRateStat("stream.sendBps", "How fast we pump data through the stream", "Stream", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
//_context.statManager().createRateStat("stream.sendBps", "How fast we pump data through the stream", "Stream", new long[] { 60*1000, 5*60*1000, 60*60*1000 });
|
||||||
_flusher = new Flusher(timer);
|
_flusher = new Flusher(timer);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("MessageOutputStream created");
|
// _log.debug("MessageOutputStream created");
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setWriteTimeout(int ms) {
|
public void setWriteTimeout(int ms) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.info("Changing write timeout from " + _writeTimeout + " to " + ms);
|
_log.debug("Changing write timeout from " + _writeTimeout + " to " + ms);
|
||||||
|
|
||||||
_writeTimeout = ms;
|
_writeTimeout = ms;
|
||||||
}
|
}
|
||||||
@ -131,15 +133,9 @@ class MessageOutputStream extends OutputStream {
|
|||||||
remaining -= toWrite;
|
remaining -= toWrite;
|
||||||
cur += toWrite;
|
cur += toWrite;
|
||||||
_valid = _buf.length;
|
_valid = _buf.length;
|
||||||
// avoid NPE from race with destroy()
|
|
||||||
DataReceiver rcvr = _dataReceiver;
|
|
||||||
if (rcvr == null) {
|
|
||||||
throwAnyError();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("write() direct valid = " + _valid);
|
_log.info("write() direct valid = " + _valid);
|
||||||
ws = rcvr.writeData(_buf, 0, _valid);
|
ws = _dataReceiver.writeData(_buf, 0, _valid);
|
||||||
_written += _valid;
|
_written += _valid;
|
||||||
_valid = 0;
|
_valid = 0;
|
||||||
throwAnyError();
|
throwAnyError();
|
||||||
@ -167,17 +163,18 @@ class MessageOutputStream extends OutputStream {
|
|||||||
_log.info("After waitForAccept of " + ws);
|
_log.info("After waitForAccept of " + ws);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.info("Queued " + len + " without sending to the receiver");
|
_log.debug("Queued " + len + " without sending to the receiver");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
long elapsed = _context.clock().now() - begin;
|
long elapsed = _context.clock().now() - begin;
|
||||||
if ( (elapsed > 10*1000) && (_log.shouldLog(Log.INFO)) )
|
if ( (elapsed > 10*1000) && (_log.shouldLog(Log.INFO)) )
|
||||||
_log.info("wtf, took " + elapsed + "ms to write to the stream?", new Exception("foo"));
|
_log.info("wtf, took " + elapsed + "ms to write to the stream?", new Exception("foo"));
|
||||||
throwAnyError();
|
throwAnyError();
|
||||||
updateBps(len);
|
//updateBps(len);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/****
|
||||||
private void updateBps(int len) {
|
private void updateBps(int len) {
|
||||||
long now = _context.clock().now();
|
long now = _context.clock().now();
|
||||||
int periods = (int)Math.floor((now - _sendPeriodBeginTime) / 1000d);
|
int periods = (int)Math.floor((now - _sendPeriodBeginTime) / 1000d);
|
||||||
@ -191,7 +188,9 @@ class MessageOutputStream extends OutputStream {
|
|||||||
_sendPeriodBytes += len;
|
_sendPeriodBytes += len;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
****/
|
||||||
|
|
||||||
|
/** */
|
||||||
public void write(int b) throws IOException {
|
public void write(int b) throws IOException {
|
||||||
write(new byte[] { (byte)b }, 0, 1);
|
write(new byte[] { (byte)b }, 0, 1);
|
||||||
throwAnyError();
|
throwAnyError();
|
||||||
@ -240,14 +239,15 @@ class MessageOutputStream extends OutputStream {
|
|||||||
_enqueued = true;
|
_enqueued = true;
|
||||||
}
|
}
|
||||||
public void timeReached() {
|
public void timeReached() {
|
||||||
|
if (_closed)
|
||||||
|
return;
|
||||||
_enqueued = false;
|
_enqueued = false;
|
||||||
DataReceiver rec = _dataReceiver;
|
|
||||||
long timeLeft = (_lastBuffered + _passiveFlushDelay - _context.clock().now());
|
long timeLeft = (_lastBuffered + _passiveFlushDelay - _context.clock().now());
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("flusher time reached: left = " + timeLeft);
|
_log.debug("flusher time reached: left = " + timeLeft);
|
||||||
if (timeLeft > 0)
|
if (timeLeft > 0)
|
||||||
enqueue();
|
enqueue();
|
||||||
else if ( (rec != null) && (rec.writeInProcess()) )
|
else if (_dataReceiver.writeInProcess())
|
||||||
enqueue(); // don't passive flush if there is a write being done (unacked outbound)
|
enqueue(); // don't passive flush if there is a write being done (unacked outbound)
|
||||||
else
|
else
|
||||||
doFlush();
|
doFlush();
|
||||||
@ -261,10 +261,8 @@ class MessageOutputStream extends OutputStream {
|
|||||||
if ( (_valid > 0) && (flushTime <= _context.clock().now()) ) {
|
if ( (_valid > 0) && (flushTime <= _context.clock().now()) ) {
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.INFO))
|
||||||
_log.info("doFlush() valid = " + _valid);
|
_log.info("doFlush() valid = " + _valid);
|
||||||
// avoid NPE from race with destroy()
|
if (_buf != null) {
|
||||||
DataReceiver rcvr = _dataReceiver;
|
ws = _dataReceiver.writeData(_buf, 0, _valid);
|
||||||
if ( (_buf != null) && (rcvr != null) ) {
|
|
||||||
ws = rcvr.writeData(_buf, 0, _valid);
|
|
||||||
_written += _valid;
|
_written += _valid;
|
||||||
_valid = 0;
|
_valid = 0;
|
||||||
_lastFlushed = _context.clock().now();
|
_lastFlushed = _context.clock().now();
|
||||||
@ -317,25 +315,18 @@ class MessageOutputStream extends OutputStream {
|
|||||||
if (_log.shouldLog(Log.INFO) && _valid > 0)
|
if (_log.shouldLog(Log.INFO) && _valid > 0)
|
||||||
_log.info("flush() valid = " + _valid);
|
_log.info("flush() valid = " + _valid);
|
||||||
|
|
||||||
// avoid NPE from race with destroy()
|
|
||||||
DataReceiver rcvr = _dataReceiver;
|
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
if (_buf == null) {
|
if (_buf == null) {
|
||||||
_dataLock.notifyAll();
|
_dataLock.notifyAll();
|
||||||
throw new IOException("closed (buffer went away)");
|
throw new IOException("closed (buffer went away)");
|
||||||
}
|
}
|
||||||
|
|
||||||
if (rcvr == null) {
|
|
||||||
_dataLock.notifyAll();
|
|
||||||
throwAnyError();
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// if valid == 0 return ??? - no, this could flush a CLOSE packet too.
|
// if valid == 0 return ??? - no, this could flush a CLOSE packet too.
|
||||||
|
|
||||||
// Yes, flush here, inside the data lock, and do all the waitForCompletion() stuff below
|
// Yes, flush here, inside the data lock, and do all the waitForCompletion() stuff below
|
||||||
// (disabled)
|
// (disabled)
|
||||||
if (!wait_for_accept_only) {
|
if (!wait_for_accept_only) {
|
||||||
ws = rcvr.writeData(_buf, 0, _valid);
|
ws = _dataReceiver.writeData(_buf, 0, _valid);
|
||||||
_written += _valid;
|
_written += _valid;
|
||||||
_valid = 0;
|
_valid = 0;
|
||||||
locked_updateBufferSize();
|
locked_updateBufferSize();
|
||||||
@ -347,7 +338,7 @@ class MessageOutputStream extends OutputStream {
|
|||||||
// Skip all the waitForCompletion() stuff below, which is insanity, as of 0.8.1
|
// Skip all the waitForCompletion() stuff below, which is insanity, as of 0.8.1
|
||||||
// must do this outside the data lock
|
// must do this outside the data lock
|
||||||
if (wait_for_accept_only) {
|
if (wait_for_accept_only) {
|
||||||
flushAvailable(rcvr, true);
|
flushAvailable(_dataReceiver, true);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,6 +378,7 @@ class MessageOutputStream extends OutputStream {
|
|||||||
}
|
}
|
||||||
// setting _closed before flush() will force flush() to send a CLOSE packet
|
// setting _closed before flush() will force flush() to send a CLOSE packet
|
||||||
_closed = true;
|
_closed = true;
|
||||||
|
_flusher.cancel();
|
||||||
|
|
||||||
// In 0.8.1 we rewrote flush() to only wait for accept into the window,
|
// In 0.8.1 we rewrote flush() to only wait for accept into the window,
|
||||||
// not "completion" (i.e. ack from the far end).
|
// not "completion" (i.e. ack from the far end).
|
||||||
@ -415,10 +407,11 @@ class MessageOutputStream extends OutputStream {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* nonblocking close -
|
* nonblocking close -
|
||||||
* Use outside of this package is deprecated, should be made package local
|
* Only for use inside package
|
||||||
*/
|
*/
|
||||||
public void closeInternal() {
|
public void closeInternal() {
|
||||||
_closed = true;
|
_closed = true;
|
||||||
|
_flusher.cancel();
|
||||||
if (_streamError == null)
|
if (_streamError == null)
|
||||||
_streamError = new IOException("Closed internally");
|
_streamError = new IOException("Closed internally");
|
||||||
clearData(true);
|
clearData(true);
|
||||||
@ -429,12 +422,10 @@ class MessageOutputStream extends OutputStream {
|
|||||||
if (_log.shouldLog(Log.INFO) && _valid > 0)
|
if (_log.shouldLog(Log.INFO) && _valid > 0)
|
||||||
_log.info("clearData() valid = " + _valid);
|
_log.info("clearData() valid = " + _valid);
|
||||||
|
|
||||||
// avoid NPE from race with destroy()
|
|
||||||
DataReceiver rcvr = _dataReceiver;
|
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
// flush any data, but don't wait for it
|
// flush any data, but don't wait for it
|
||||||
if ( (rcvr != null) && (_valid > 0) && shouldFlush)
|
if (_valid > 0 && shouldFlush)
|
||||||
rcvr.writeData(_buf, 0, _valid);
|
_dataReceiver.writeData(_buf, 0, _valid);
|
||||||
_written += _valid;
|
_written += _valid;
|
||||||
_valid = 0;
|
_valid = 0;
|
||||||
|
|
||||||
@ -503,15 +494,15 @@ class MessageOutputStream extends OutputStream {
|
|||||||
throw new InterruptedIOException("Flush available timed out (" + _writeTimeout + "ms)");
|
throw new InterruptedIOException("Flush available timed out (" + _writeTimeout + "ms)");
|
||||||
}
|
}
|
||||||
long afterAccept = System.currentTimeMillis();
|
long afterAccept = System.currentTimeMillis();
|
||||||
if ( (afterAccept - afterBuild > 1000) && (_log.shouldLog(Log.DEBUG)) )
|
if ( (afterAccept - afterBuild > 1000) && (_log.shouldLog(Log.INFO)) )
|
||||||
_log.debug("Took " + (afterAccept-afterBuild) + "ms to accept a packet? " + ws);
|
_log.info("Took " + (afterAccept-afterBuild) + "ms to accept a packet? " + ws);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
void destroy() {
|
void destroy() {
|
||||||
_dataReceiver = null;
|
_closed = true;
|
||||||
|
_flusher.cancel();
|
||||||
synchronized (_dataLock) {
|
synchronized (_dataLock) {
|
||||||
_closed = true;
|
|
||||||
_dataLock.notifyAll();
|
_dataLock.notifyAll();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -395,6 +395,7 @@ class Packet {
|
|||||||
DataHelper.toLong(buffer, cur, 4, _ackThrough > 0 ? _ackThrough : 0);
|
DataHelper.toLong(buffer, cur, 4, _ackThrough > 0 ? _ackThrough : 0);
|
||||||
cur += 4;
|
cur += 4;
|
||||||
if (_nacks != null) {
|
if (_nacks != null) {
|
||||||
|
// if max win is ever > 255, limit to 255
|
||||||
DataHelper.toLong(buffer, cur, 1, _nacks.length);
|
DataHelper.toLong(buffer, cur, 1, _nacks.length);
|
||||||
cur++;
|
cur++;
|
||||||
for (int i = 0; i < _nacks.length; i++) {
|
for (int i = 0; i < _nacks.length; i++) {
|
||||||
@ -461,7 +462,7 @@ class Packet {
|
|||||||
* @return How large the current packet would be
|
* @return How large the current packet would be
|
||||||
* @throws IllegalStateException
|
* @throws IllegalStateException
|
||||||
*/
|
*/
|
||||||
public int writtenSize() throws IllegalStateException {
|
private int writtenSize() {
|
||||||
int size = 0;
|
int size = 0;
|
||||||
size += 4; // _sendStreamId.length;
|
size += 4; // _sendStreamId.length;
|
||||||
size += 4; // _receiveStreamId.length;
|
size += 4; // _receiveStreamId.length;
|
||||||
@ -469,6 +470,7 @@ class Packet {
|
|||||||
size += 4; // ackThrough
|
size += 4; // ackThrough
|
||||||
if (_nacks != null) {
|
if (_nacks != null) {
|
||||||
size++; // nacks length
|
size++; // nacks length
|
||||||
|
// if max win is ever > 255, limit to 255
|
||||||
size += 4 * _nacks.length;
|
size += 4 * _nacks.length;
|
||||||
} else {
|
} else {
|
||||||
size++; // nacks length
|
size++; // nacks length
|
||||||
@ -671,10 +673,11 @@ class Packet {
|
|||||||
buf.append(toId(_sendStreamId));
|
buf.append(toId(_sendStreamId));
|
||||||
//buf.append("<-->");
|
//buf.append("<-->");
|
||||||
buf.append(toId(_receiveStreamId)).append(": #").append(_sequenceNum);
|
buf.append(toId(_receiveStreamId)).append(": #").append(_sequenceNum);
|
||||||
if (_sequenceNum < 10)
|
//if (_sequenceNum < 10)
|
||||||
buf.append(" \t"); // so the tab lines up right
|
// buf.append(" \t"); // so the tab lines up right
|
||||||
else
|
//else
|
||||||
buf.append('\t');
|
// buf.append('\t');
|
||||||
|
buf.append(' ');
|
||||||
buf.append(toFlagString());
|
buf.append(toFlagString());
|
||||||
buf.append(" ACK ").append(getAckThrough());
|
buf.append(" ACK ").append(getAckThrough());
|
||||||
if (_nacks != null) {
|
if (_nacks != null) {
|
||||||
|
@ -2,7 +2,6 @@ package net.i2p.client.streaming;
|
|||||||
|
|
||||||
import java.text.SimpleDateFormat;
|
import java.text.SimpleDateFormat;
|
||||||
import java.util.Date;
|
import java.util.Date;
|
||||||
import java.util.Iterator;
|
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
|
||||||
import net.i2p.I2PAppContext;
|
import net.i2p.I2PAppContext;
|
||||||
@ -13,7 +12,8 @@ import net.i2p.util.Log;
|
|||||||
/**
|
/**
|
||||||
* receive a packet and dispatch it correctly to the connection specified,
|
* receive a packet and dispatch it correctly to the connection specified,
|
||||||
* the server socket, or queue a reply RST packet.
|
* the server socket, or queue a reply RST packet.
|
||||||
*
|
*<p>
|
||||||
|
* I2PSession -> MessageHandler -> PacketHandler -> ConnectionPacketHandler -> MessageInputStream
|
||||||
*/
|
*/
|
||||||
class PacketHandler {
|
class PacketHandler {
|
||||||
private final ConnectionManager _manager;
|
private final ConnectionManager _manager;
|
||||||
@ -86,6 +86,7 @@ class PacketHandler {
|
|||||||
}
|
}
|
||||||
*****/
|
*****/
|
||||||
|
|
||||||
|
/** */
|
||||||
void receivePacket(Packet packet) {
|
void receivePacket(Packet packet) {
|
||||||
//boolean ok = choke(packet);
|
//boolean ok = choke(packet);
|
||||||
//if (ok)
|
//if (ok)
|
||||||
@ -202,15 +203,13 @@ class PacketHandler {
|
|||||||
// someone is sending us a packet on the wrong stream
|
// someone is sending us a packet on the wrong stream
|
||||||
// It isn't a SYN so it isn't likely to have a FROM to send a reset back to
|
// It isn't a SYN so it isn't likely to have a FROM to send a reset back to
|
||||||
if (_log.shouldLog(Log.ERROR)) {
|
if (_log.shouldLog(Log.ERROR)) {
|
||||||
Set cons = _manager.listConnections();
|
|
||||||
StringBuilder buf = new StringBuilder(512);
|
StringBuilder buf = new StringBuilder(512);
|
||||||
buf.append("Received a packet on the wrong stream: ");
|
buf.append("Received a packet on the wrong stream: ");
|
||||||
buf.append(packet);
|
buf.append(packet);
|
||||||
buf.append("\nthis connection:\n");
|
buf.append("\nthis connection:\n");
|
||||||
buf.append(con);
|
buf.append(con);
|
||||||
buf.append("\nall connections:");
|
buf.append("\nall connections:");
|
||||||
for (Iterator iter = cons.iterator(); iter.hasNext();) {
|
for (Connection cur : _manager.listConnections()) {
|
||||||
Connection cur = (Connection)iter.next();
|
|
||||||
buf.append('\n').append(cur);
|
buf.append('\n').append(cur);
|
||||||
}
|
}
|
||||||
_log.error(buf.toString(), new Exception("Wrong stream"));
|
_log.error(buf.toString(), new Exception("Wrong stream"));
|
||||||
@ -299,9 +298,7 @@ class PacketHandler {
|
|||||||
}
|
}
|
||||||
if (_log.shouldLog(Log.DEBUG)) {
|
if (_log.shouldLog(Log.DEBUG)) {
|
||||||
StringBuilder buf = new StringBuilder(128);
|
StringBuilder buf = new StringBuilder(128);
|
||||||
Set cons = _manager.listConnections();
|
for (Connection con : _manager.listConnections()) {
|
||||||
for (Iterator iter = cons.iterator(); iter.hasNext(); ) {
|
|
||||||
Connection con = (Connection)iter.next();
|
|
||||||
buf.append(con.toString()).append(" ");
|
buf.append(con.toString()).append(" ");
|
||||||
}
|
}
|
||||||
_log.debug("connections: " + buf.toString() + " sendId: "
|
_log.debug("connections: " + buf.toString() + " sendId: "
|
||||||
|
@ -16,11 +16,11 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
private final I2PAppContext _context;
|
private final I2PAppContext _context;
|
||||||
private final Log _log;
|
private final Log _log;
|
||||||
private final Connection _connection;
|
private final Connection _connection;
|
||||||
private Destination _to;
|
private final Destination _to;
|
||||||
private SessionKey _keyUsed;
|
private SessionKey _keyUsed;
|
||||||
private Set _tagsSent;
|
private Set _tagsSent;
|
||||||
private final long _createdOn;
|
private final long _createdOn;
|
||||||
private int _numSends;
|
private volatile int _numSends;
|
||||||
private long _lastSend;
|
private long _lastSend;
|
||||||
private long _acceptedOn;
|
private long _acceptedOn;
|
||||||
private long _ackOn;
|
private long _ackOn;
|
||||||
@ -45,7 +45,6 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public Destination getTo() { return _to; }
|
public Destination getTo() { return _to; }
|
||||||
public void setTo(Destination to) { _to = to; }
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @deprecated should always return null
|
* @deprecated should always return null
|
||||||
@ -72,6 +71,7 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
public void setTagsSent(Set tags) {
|
public void setTagsSent(Set tags) {
|
||||||
if (tags != null && !tags.isEmpty())
|
if (tags != null && !tags.isEmpty())
|
||||||
_log.error("Who is sending tags thru the streaming lib? " + tags.size());
|
_log.error("Who is sending tags thru the streaming lib? " + tags.size());
|
||||||
|
/****
|
||||||
if ( (_tagsSent != null) && (!_tagsSent.isEmpty()) && (!tags.isEmpty()) ) {
|
if ( (_tagsSent != null) && (!_tagsSent.isEmpty()) && (!tags.isEmpty()) ) {
|
||||||
//int old = _tagsSent.size();
|
//int old = _tagsSent.size();
|
||||||
//_tagsSent.addAll(tags);
|
//_tagsSent.addAll(tags);
|
||||||
@ -80,6 +80,7 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
} else {
|
} else {
|
||||||
_tagsSent = tags;
|
_tagsSent = tags;
|
||||||
}
|
}
|
||||||
|
****/
|
||||||
}
|
}
|
||||||
|
|
||||||
public boolean shouldSign() {
|
public boolean shouldSign() {
|
||||||
@ -142,10 +143,15 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
/** @return null if not bound */
|
/** @return null if not bound */
|
||||||
public Connection getConnection() { return _connection; }
|
public Connection getConnection() { return _connection; }
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Will force a fast restransmit on the 3rd call (FAST_RETRANSMIT_THRESHOLD)
|
||||||
|
* but only if it's the lowest unacked (see Connection.ResendPacketEvent)
|
||||||
|
*/
|
||||||
public void incrementNACKs() {
|
public void incrementNACKs() {
|
||||||
int cnt = ++_nackCount;
|
int cnt = ++_nackCount;
|
||||||
SimpleTimer2.TimedEvent evt = _resendEvent;
|
SimpleTimer2.TimedEvent evt = _resendEvent;
|
||||||
if ( (cnt >= Connection.FAST_RETRANSMIT_THRESHOLD) && (evt != null) && (!_retransmitted)) {
|
if (cnt >= Connection.FAST_RETRANSMIT_THRESHOLD && evt != null && (!_retransmitted) &&
|
||||||
|
(_numSends == 1 || _lastSend < _context.clock().now() + 4*1000)) { // Don't fast retx if we recently resent it
|
||||||
_retransmitted = true;
|
_retransmitted = true;
|
||||||
evt.reschedule(0);
|
evt.reschedule(0);
|
||||||
}
|
}
|
||||||
@ -162,8 +168,11 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
if (con != null)
|
if (con != null)
|
||||||
buf.append(" rtt ").append(con.getOptions().getRTT());
|
buf.append(" rtt ").append(con.getOptions().getRTT());
|
||||||
|
|
||||||
if ( (_tagsSent != null) && (!_tagsSent.isEmpty()) )
|
//if ( (_tagsSent != null) && (!_tagsSent.isEmpty()) )
|
||||||
buf.append(" with tags");
|
// buf.append(" with tags");
|
||||||
|
|
||||||
|
if (_nackCount > 0)
|
||||||
|
buf.append(" nacked ").append(_nackCount).append(" times");
|
||||||
|
|
||||||
if (_ackOn > 0)
|
if (_ackOn > 0)
|
||||||
buf.append(" ack after ").append(getAckTime());
|
buf.append(" ack after ").append(getAckTime());
|
||||||
@ -200,8 +209,6 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
* @param maxWaitMs MessageOutputStream is the only caller, generally with -1
|
* @param maxWaitMs MessageOutputStream is the only caller, generally with -1
|
||||||
*/
|
*/
|
||||||
public void waitForAccept(int maxWaitMs) {
|
public void waitForAccept(int maxWaitMs) {
|
||||||
if (_connection == null)
|
|
||||||
throw new IllegalStateException("Cannot wait for accept with no connection");
|
|
||||||
long before = _context.clock().now();
|
long before = _context.clock().now();
|
||||||
int queued = _connection.getUnackedPacketsSent();
|
int queued = _connection.getUnackedPacketsSent();
|
||||||
int window = _connection.getOptions().getWindowSize();
|
int window = _connection.getOptions().getWindowSize();
|
||||||
@ -216,7 +223,7 @@ class PacketLocal extends Packet implements MessageOutputStream.WriteStatus {
|
|||||||
int afterQueued = _connection.getUnackedPacketsSent();
|
int afterQueued = _connection.getUnackedPacketsSent();
|
||||||
if ( (after - before > 1000) && (_log.shouldLog(Log.DEBUG)) )
|
if ( (after - before > 1000) && (_log.shouldLog(Log.DEBUG)) )
|
||||||
_log.debug("Took " + (after-before) + "ms to get "
|
_log.debug("Took " + (after-before) + "ms to get "
|
||||||
+ (accepted ? " accepted" : " rejected")
|
+ (accepted ? "accepted" : "rejected")
|
||||||
+ (_cancelledOn > 0 ? " and CANCELLED" : "")
|
+ (_cancelledOn > 0 ? " and CANCELLED" : "")
|
||||||
+ ", queued behind " + queued +" with a window size of " + window
|
+ ", queued behind " + queued +" with a window size of " + window
|
||||||
+ ", finally accepted with " + afterQueued + " queued: "
|
+ ", finally accepted with " + afterQueued + " queued: "
|
||||||
|
@ -12,7 +12,8 @@ import net.i2p.util.Log;
|
|||||||
* Well, thats the theory at least... in practice we just
|
* Well, thats the theory at least... in practice we just
|
||||||
* send them immediately with no blocking, since the
|
* send them immediately with no blocking, since the
|
||||||
* mode=bestEffort doesnt block in the SDK.
|
* mode=bestEffort doesnt block in the SDK.
|
||||||
*
|
*<p>
|
||||||
|
* MessageOutputStream -> ConnectionDataReceiver -> Connection -> PacketQueue -> I2PSession
|
||||||
*/
|
*/
|
||||||
class PacketQueue {
|
class PacketQueue {
|
||||||
private final I2PAppContext _context;
|
private final I2PAppContext _context;
|
||||||
@ -26,16 +27,17 @@ class PacketQueue {
|
|||||||
_session = session;
|
_session = session;
|
||||||
_connectionManager = mgr;
|
_connectionManager = mgr;
|
||||||
_log = context.logManager().getLog(PacketQueue.class);
|
_log = context.logManager().getLog(PacketQueue.class);
|
||||||
_context.statManager().createRateStat("stream.con.sendMessageSize", "Size of a message sent on a connection", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
// all createRateStats in ConnectionManager
|
||||||
_context.statManager().createRateStat("stream.con.sendDuplicateSize", "Size of a message resent on a connection", "Stream", new long[] { 60*1000, 10*60*1000, 60*60*1000 });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Add a new packet to be sent out ASAP
|
* Add a new packet to be sent out ASAP
|
||||||
*
|
*
|
||||||
* keys and tags disabled since dropped in I2PSession
|
* keys and tags disabled since dropped in I2PSession
|
||||||
|
* @return true if sent
|
||||||
*/
|
*/
|
||||||
public void enqueue(PacketLocal packet) {
|
public boolean enqueue(PacketLocal packet) {
|
||||||
|
// this updates the ack/nack field
|
||||||
packet.prepare();
|
packet.prepare();
|
||||||
|
|
||||||
//SessionKey keyUsed = packet.getKeyUsed();
|
//SessionKey keyUsed = packet.getKeyUsed();
|
||||||
@ -52,7 +54,7 @@ class PacketQueue {
|
|||||||
if (packet.getAckTime() > 0) {
|
if (packet.getAckTime() > 0) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Not resending " + packet);
|
_log.debug("Not resending " + packet);
|
||||||
return;
|
return false;
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Sending... " + packet);
|
_log.debug("Sending... " + packet);
|
||||||
@ -76,7 +78,7 @@ class PacketQueue {
|
|||||||
_log.warn("took " + writeTime + "ms to write the packet: " + packet);
|
_log.warn("took " + writeTime + "ms to write the packet: " + packet);
|
||||||
|
|
||||||
// last chance to short circuit...
|
// last chance to short circuit...
|
||||||
if (packet.getAckTime() > 0) return;
|
if (packet.getAckTime() > 0) return false;
|
||||||
|
|
||||||
// this should not block!
|
// this should not block!
|
||||||
begin = _context.clock().now();
|
begin = _context.clock().now();
|
||||||
@ -158,6 +160,7 @@ class PacketQueue {
|
|||||||
// reset
|
// reset
|
||||||
packet.releasePayload();
|
packet.releasePayload();
|
||||||
}
|
}
|
||||||
|
return sent;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -15,7 +15,7 @@ class SchedulerChooser {
|
|||||||
private final Log _log;
|
private final Log _log;
|
||||||
private final TaskScheduler _nullScheduler;
|
private final TaskScheduler _nullScheduler;
|
||||||
/** list of TaskScheduler objects */
|
/** list of TaskScheduler objects */
|
||||||
private final List _schedulers;
|
private final List<TaskScheduler> _schedulers;
|
||||||
|
|
||||||
public SchedulerChooser(I2PAppContext context) {
|
public SchedulerChooser(I2PAppContext context) {
|
||||||
_context = context;
|
_context = context;
|
||||||
@ -26,7 +26,7 @@ class SchedulerChooser {
|
|||||||
|
|
||||||
public TaskScheduler getScheduler(Connection con) {
|
public TaskScheduler getScheduler(Connection con) {
|
||||||
for (int i = 0; i < _schedulers.size(); i++) {
|
for (int i = 0; i < _schedulers.size(); i++) {
|
||||||
TaskScheduler scheduler = (TaskScheduler)_schedulers.get(i);
|
TaskScheduler scheduler = _schedulers.get(i);
|
||||||
if (scheduler.accept(con)) {
|
if (scheduler.accept(con)) {
|
||||||
//if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
// _log.debug("Scheduling for " + con + " with " + scheduler.getClass().getName());
|
// _log.debug("Scheduling for " + con + " with " + scheduler.getClass().getName());
|
||||||
@ -50,11 +50,7 @@ class SchedulerChooser {
|
|||||||
}
|
}
|
||||||
|
|
||||||
private class NullScheduler implements TaskScheduler {
|
private class NullScheduler implements TaskScheduler {
|
||||||
private final Log _log;
|
|
||||||
public NullScheduler() {
|
|
||||||
_log = _context.logManager().getLog(NullScheduler.class);
|
|
||||||
}
|
|
||||||
|
|
||||||
public void eventOccurred(Connection con) {
|
public void eventOccurred(Connection con) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Yell at jrandom: Event occurred on " + con, new Exception("source"));
|
_log.warn("Yell at jrandom: Event occurred on " + con, new Exception("source"));
|
||||||
|
@ -45,18 +45,25 @@ class SchedulerClosing extends SchedulerImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void eventOccurred(Connection con) {
|
public void eventOccurred(Connection con) {
|
||||||
if (con.getNextSendTime() <= 0)
|
long nextSend = con.getNextSendTime();
|
||||||
con.setNextSendTime(_context.clock().now() + con.getOptions().getSendAckDelay());
|
long now = _context.clock().now();
|
||||||
long remaining = con.getNextSendTime() - _context.clock().now();
|
long remaining;
|
||||||
|
if (nextSend <= 0) {
|
||||||
|
remaining = con.getOptions().getSendAckDelay();
|
||||||
|
nextSend = now + remaining;
|
||||||
|
con.setNextSendTime(nextSend);
|
||||||
|
} else {
|
||||||
|
remaining = nextSend - now;
|
||||||
|
}
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Event occurred w/ remaining: " + remaining + " on " + con);
|
_log.debug("Event occurred w/ remaining: " + remaining + " on " + con);
|
||||||
if (remaining <= 0) {
|
if (remaining <= 0) {
|
||||||
if (con.getCloseSentOn() <= 0) {
|
if (con.getCloseSentOn() <= 0) {
|
||||||
con.sendAvailable();
|
con.sendAvailable();
|
||||||
con.setNextSendTime(_context.clock().now() + con.getOptions().getSendAckDelay());
|
|
||||||
} else {
|
} else {
|
||||||
con.ackImmediately();
|
//con.ackImmediately();
|
||||||
}
|
}
|
||||||
|
con.setNextSendTime(now + con.getOptions().getSendAckDelay());
|
||||||
} else {
|
} else {
|
||||||
//if (remaining < 5*1000)
|
//if (remaining < 5*1000)
|
||||||
// remaining = 5*1000;
|
// remaining = 5*1000;
|
||||||
|
@ -19,4 +19,9 @@ abstract class SchedulerImpl implements TaskScheduler {
|
|||||||
protected void reschedule(long msToWait, Connection con) {
|
protected void reschedule(long msToWait, Connection con) {
|
||||||
SimpleScheduler.getInstance().addEvent(con.getConnectionEvent(), msToWait);
|
SimpleScheduler.getInstance().addEvent(con.getConnectionEvent(), msToWait);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String toString() {
|
||||||
|
return getClass().getSimpleName();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -130,8 +130,8 @@ class TCBShare {
|
|||||||
super(timer);
|
super(timer);
|
||||||
}
|
}
|
||||||
public void timeReached() {
|
public void timeReached() {
|
||||||
for (Iterator iter = _cache.keySet().iterator(); iter.hasNext(); ) {
|
for (Iterator<Entry> iter = _cache.values().iterator(); iter.hasNext(); ) {
|
||||||
if (_cache.get(iter.next()).isExpired())
|
if (iter.next().isExpired())
|
||||||
iter.remove();
|
iter.remove();
|
||||||
}
|
}
|
||||||
schedule(CLEAN_TIME);
|
schedule(CLEAN_TIME);
|
||||||
|
@ -584,7 +584,7 @@ abstract class I2PSessionImpl implements I2PSession, I2CPMessageReader.I2CPMessa
|
|||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getPrefix() + "Message received of type " + message.getType()
|
_log.debug(getPrefix() + "Message received of type " + message.getType()
|
||||||
+ " to be handled by " + handler);
|
+ " to be handled by " + handler.getClass().getSimpleName());
|
||||||
handler.handleMessage(message, this);
|
handler.handleMessage(message, this);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -61,7 +61,8 @@ public class ElGamalAESEngine {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Decrypt the message using the given private key using tags from the default key manager.
|
* Decrypt the message using the given private key using tags from the default key manager,
|
||||||
|
* which is the router's key manager. Use extreme care if you aren't the router.
|
||||||
*
|
*
|
||||||
* @deprecated specify the key manager!
|
* @deprecated specify the key manager!
|
||||||
*/
|
*/
|
||||||
@ -75,6 +76,10 @@ public class ElGamalAESEngine {
|
|||||||
* This works according to the
|
* This works according to the
|
||||||
* ElGamal+AES algorithm in the data structure spec.
|
* ElGamal+AES algorithm in the data structure spec.
|
||||||
*
|
*
|
||||||
|
* Warning - use the correct SessionKeyManager. Clients should instantiate their own.
|
||||||
|
* Clients using I2PAppContext.sessionKeyManager() may be correlated with the router,
|
||||||
|
* unless you are careful to use different keys.
|
||||||
|
*
|
||||||
* @return decrypted data or null on failure
|
* @return decrypted data or null on failure
|
||||||
*/
|
*/
|
||||||
public byte[] decrypt(byte data[], PrivateKey targetPrivateKey, SessionKeyManager keyManager) throws DataFormatException {
|
public byte[] decrypt(byte data[], PrivateKey targetPrivateKey, SessionKeyManager keyManager) throws DataFormatException {
|
||||||
@ -100,7 +105,7 @@ public class ElGamalAESEngine {
|
|||||||
//if (_log.shouldLog(Log.DEBUG)) _log.debug("Key is known for tag " + st);
|
//if (_log.shouldLog(Log.DEBUG)) _log.debug("Key is known for tag " + st);
|
||||||
long id = _context.random().nextLong();
|
long id = _context.random().nextLong();
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(id + ": Decrypting existing session encrypted with tag: " + st.toString() + ": key: " + key.toBase64() + ": " + data.length + " bytes: " + Base64.encode(data, 0, 64));
|
_log.debug(id + ": Decrypting existing session encrypted with tag: " + st.toString() + ": key: " + key.toBase64() + ": " + data.length + " bytes " /* + Base64.encode(data, 0, 64) */ );
|
||||||
|
|
||||||
decrypted = decryptExistingSession(data, key, targetPrivateKey, foundTags, usedKey, foundKey);
|
decrypted = decryptExistingSession(data, key, targetPrivateKey, foundTags, usedKey, foundKey);
|
||||||
if (decrypted != null) {
|
if (decrypted != null) {
|
||||||
@ -410,7 +415,7 @@ public class ElGamalAESEngine {
|
|||||||
_context.statManager().updateFrequency("crypto.elGamalAES.encryptExistingSession");
|
_context.statManager().updateFrequency("crypto.elGamalAES.encryptExistingSession");
|
||||||
byte rv[] = encryptExistingSession(data, target, key, tagsForDelivery, currentTag, newKey, paddedSize);
|
byte rv[] = encryptExistingSession(data, target, key, tagsForDelivery, currentTag, newKey, paddedSize);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Existing session encrypted with tag: " + currentTag.toString() + ": " + rv.length + " bytes and key: " + key.toBase64() + ": " + Base64.encode(rv, 0, 64));
|
_log.debug("Existing session encrypted with tag: " + currentTag.toString() + ": " + rv.length + " bytes and key: " + key.toBase64() /* + ": " + Base64.encode(rv, 0, 64) */);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -599,7 +604,6 @@ public class ElGamalAESEngine {
|
|||||||
//_log.debug("Encrypting AES");
|
//_log.debug("Encrypting AES");
|
||||||
if (tagsForDelivery == null) tagsForDelivery = Collections.EMPTY_SET;
|
if (tagsForDelivery == null) tagsForDelivery = Collections.EMPTY_SET;
|
||||||
int size = 2 // sizeof(tags)
|
int size = 2 // sizeof(tags)
|
||||||
+ tagsForDelivery.size()
|
|
||||||
+ SessionTag.BYTE_LENGTH*tagsForDelivery.size()
|
+ SessionTag.BYTE_LENGTH*tagsForDelivery.size()
|
||||||
+ 4 // payload length
|
+ 4 // payload length
|
||||||
+ Hash.HASH_LENGTH
|
+ Hash.HASH_LENGTH
|
||||||
|
@ -59,7 +59,8 @@ public class SessionKeyManager {
|
|||||||
* Associate a new session key with the specified target. Metrics to determine
|
* Associate a new session key with the specified target. Metrics to determine
|
||||||
* when to expire that key begin with this call.
|
* when to expire that key begin with this call.
|
||||||
*
|
*
|
||||||
* @deprecated racy
|
* Racy if called after getCurrentKey() to check for a current session;
|
||||||
|
* use getCurrentOrNewKey() in that case.
|
||||||
*/
|
*/
|
||||||
public void createSession(PublicKey target, SessionKey key) { // nop
|
public void createSession(PublicKey target, SessionKey key) { // nop
|
||||||
}
|
}
|
||||||
@ -67,7 +68,8 @@ public class SessionKeyManager {
|
|||||||
/**
|
/**
|
||||||
* Generate a new session key and associate it with the specified target.
|
* Generate a new session key and associate it with the specified target.
|
||||||
*
|
*
|
||||||
* @deprecated racy
|
* Racy if called after getCurrentKey() to check for a current session;
|
||||||
|
* use getCurrentOrNewKey() in that case.
|
||||||
*/
|
*/
|
||||||
public SessionKey createSession(PublicKey target) {
|
public SessionKey createSession(PublicKey target) {
|
||||||
SessionKey key = KeyGenerator.getInstance().generateSessionKey();
|
SessionKey key = KeyGenerator.getInstance().generateSessionKey();
|
||||||
|
@ -243,7 +243,8 @@ public class TransientSessionKeyManager extends SessionKeyManager {
|
|||||||
* Associate a new session key with the specified target. Metrics to determine
|
* Associate a new session key with the specified target. Metrics to determine
|
||||||
* when to expire that key begin with this call.
|
* when to expire that key begin with this call.
|
||||||
*
|
*
|
||||||
* @deprecated racy
|
* Racy if called after getCurrentKey() to check for a current session;
|
||||||
|
* use getCurrentOrNewKey() in that case.
|
||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void createSession(PublicKey target, SessionKey key) {
|
public void createSession(PublicKey target, SessionKey key) {
|
||||||
|
@ -458,19 +458,32 @@ public class EepGet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public void stopFetching() { _keepFetching = false; }
|
public void stopFetching() { _keepFetching = false; }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Blocking fetch, returning true if the URL was retrieved, false if all retries failed
|
* Blocking fetch, returning true if the URL was retrieved, false if all retries failed.
|
||||||
*
|
*
|
||||||
|
* Header timeout default 45 sec, total timeout default none, inactivity timeout default 60 sec.
|
||||||
*/
|
*/
|
||||||
public boolean fetch() { return fetch(_fetchHeaderTimeout); }
|
public boolean fetch() { return fetch(_fetchHeaderTimeout); }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Blocking fetch, timing out individual attempts if the HTTP response headers
|
* Blocking fetch, timing out individual attempts if the HTTP response headers
|
||||||
* don't come back in the time given. If the timeout is zero or less, this will
|
* don't come back in the time given. If the timeout is zero or less, this will
|
||||||
* wait indefinitely.
|
* wait indefinitely.
|
||||||
|
*
|
||||||
|
* Total timeout default none, inactivity timeout default 60 sec.
|
||||||
*/
|
*/
|
||||||
public boolean fetch(long fetchHeaderTimeout) {
|
public boolean fetch(long fetchHeaderTimeout) {
|
||||||
return fetch(fetchHeaderTimeout, -1, -1);
|
return fetch(fetchHeaderTimeout, -1, -1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Blocking fetch.
|
||||||
|
*
|
||||||
|
* @param fetchHeaderTimeout <= 0 for none (proxy will timeout if none, none isn't recommended if no proxy)
|
||||||
|
* @param totalTimeout <= 0 for default none
|
||||||
|
* @param inactivityTimeout <= 0 for default 60 sec
|
||||||
|
*/
|
||||||
public boolean fetch(long fetchHeaderTimeout, long totalTimeout, long inactivityTimeout) {
|
public boolean fetch(long fetchHeaderTimeout, long totalTimeout, long inactivityTimeout) {
|
||||||
_fetchHeaderTimeout = fetchHeaderTimeout;
|
_fetchHeaderTimeout = fetchHeaderTimeout;
|
||||||
_fetchEndTime = (totalTimeout > 0 ? System.currentTimeMillis() + totalTimeout : -1);
|
_fetchEndTime = (totalTimeout > 0 ? System.currentTimeMillis() + totalTimeout : -1);
|
||||||
|
@ -9,12 +9,13 @@ import net.i2p.I2PAppContext;
|
|||||||
*/
|
*/
|
||||||
class Executor implements Runnable {
|
class Executor implements Runnable {
|
||||||
private final I2PAppContext _context;
|
private final I2PAppContext _context;
|
||||||
private Log _log;
|
private final Log _log;
|
||||||
private final List _readyEvents;
|
private final List<SimpleTimer.TimedEvent> _readyEvents;
|
||||||
private final SimpleStore runn;
|
private final SimpleStore runn;
|
||||||
|
|
||||||
public Executor(I2PAppContext ctx, Log log, List events, SimpleStore x) {
|
public Executor(I2PAppContext ctx, Log log, List<SimpleTimer.TimedEvent> events, SimpleStore x) {
|
||||||
_context = ctx;
|
_context = ctx;
|
||||||
|
_log = log;
|
||||||
_readyEvents = events;
|
_readyEvents = events;
|
||||||
runn = x;
|
runn = x;
|
||||||
}
|
}
|
||||||
@ -26,7 +27,7 @@ class Executor implements Runnable {
|
|||||||
if (_readyEvents.isEmpty())
|
if (_readyEvents.isEmpty())
|
||||||
try { _readyEvents.wait(); } catch (InterruptedException ie) {}
|
try { _readyEvents.wait(); } catch (InterruptedException ie) {}
|
||||||
if (!_readyEvents.isEmpty())
|
if (!_readyEvents.isEmpty())
|
||||||
evt = (SimpleTimer.TimedEvent)_readyEvents.remove(0);
|
evt = _readyEvents.remove(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (evt != null) {
|
if (evt != null) {
|
||||||
@ -34,21 +35,12 @@ class Executor implements Runnable {
|
|||||||
try {
|
try {
|
||||||
evt.timeReached();
|
evt.timeReached();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
log("Executing task " + evt + " exited unexpectedly, please report", t);
|
_log.error("Executing task " + evt + " exited unexpectedly, please report", t);
|
||||||
}
|
}
|
||||||
long time = _context.clock().now() - before;
|
long time = _context.clock().now() - before;
|
||||||
// FIXME _log won't be non-null unless we already had a CRIT
|
if ( (time > 1000) && (_log.shouldLog(Log.WARN)) )
|
||||||
if ( (time > 1000) && (_log != null) && (_log.shouldLog(Log.WARN)) )
|
|
||||||
_log.warn("wtf, event execution took " + time + ": " + evt);
|
_log.warn("wtf, event execution took " + time + ": " + evt);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void log(String msg, Throwable t) {
|
|
||||||
synchronized (this) {
|
|
||||||
if (_log == null)
|
|
||||||
_log = I2PAppContext.getGlobalContext().logManager().getLog(SimpleTimer.class);
|
|
||||||
}
|
|
||||||
_log.log(Log.CRIT, msg, t);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
66
history.txt
66
history.txt
@ -1,3 +1,69 @@
|
|||||||
|
2012-07-01 zzz
|
||||||
|
* i2psnark:
|
||||||
|
- Don't send a keepalive to a peer we are going to disconnect
|
||||||
|
- Disconnect peer when idle a long time
|
||||||
|
- PeerCheckerTask cleanup
|
||||||
|
- Static ref cleanup
|
||||||
|
- Don't show a downloaded torrent file as "seeding"
|
||||||
|
- Better torrent file download icon
|
||||||
|
|
||||||
|
2012-06-29 zzz
|
||||||
|
* HTTP Proxy: Change the error code for unknown host from 404 to 500
|
||||||
|
* SimpleTimer: Fix logging
|
||||||
|
* Streaming:
|
||||||
|
- Allow at least 3 packets and up to half the window to be active resends
|
||||||
|
instead of just 1, to reduce stall time after a packet drop
|
||||||
|
- Increase fast retransmit threshold back to 3 to reduce retransmissions
|
||||||
|
- Don't fast retransmit if we recently retransmitted it already
|
||||||
|
- Allow double the window as long as gaps are less than the window
|
||||||
|
- Don't set the MSS in a resent packet (saves 2 bytes)
|
||||||
|
- Remove redundant calls to updateAcks()
|
||||||
|
- Update activity timer when resending a packet
|
||||||
|
- Reset unackedPacketsReceived counter at all places where acks are sent
|
||||||
|
so it isn't wrong
|
||||||
|
- Fix some places where the activeResends count could become wrong
|
||||||
|
- Prevent storm of CLOSE packets
|
||||||
|
- Never resend the whole packet in ackImmediately(), just send an ack
|
||||||
|
- Cancel flusher timer in MessageOutputStream when closed
|
||||||
|
- Move some createRateStats to ConnectionManager to reduce repeated calls
|
||||||
|
- Cleanups, javadocs, logging, volatile, finals
|
||||||
|
* Update: Increase eepget timeouts
|
||||||
|
|
||||||
|
2012-06-24 zzz
|
||||||
|
* ElGamalAESEngine: Fix bad size estimate when tags are included,
|
||||||
|
resulting in trailing zeros after the padding
|
||||||
|
in the unencrypted data
|
||||||
|
* i2psnark: Don't create a new PeerCoordinator after restart, as the
|
||||||
|
TrackerClient holds on to the old one and that causes it
|
||||||
|
to not get peers. Possibly fixes ticket #563.
|
||||||
|
* I2PTunnel: Fix NPE on shared client creation, thx kytv
|
||||||
|
* Transport: Add Ethiopia to hidden mode list
|
||||||
|
|
||||||
|
2012-06-21 zzz
|
||||||
|
* I2CP: Make separate message ID counters per-destination, use atomic,
|
||||||
|
increase max (could have caused "local loopback" problems)
|
||||||
|
* IRC Client: Don't flush output unless out of input, so the
|
||||||
|
streaming messages don't get split up unnecessarily
|
||||||
|
* OCMOSJ, ElG, Streaming: log tweaks
|
||||||
|
* TunnelInfo: Change msg counter from long to int
|
||||||
|
* TunnelPeerSelectors: Minor refactoring to store context
|
||||||
|
* TunnelPool: Fix bug where a tunnel was marked as reused when it wasn't
|
||||||
|
* TunnelPoolManager: Use one ClientPeerSelector for all pools
|
||||||
|
|
||||||
|
2012-06-20 zzz
|
||||||
|
* I2PSession:
|
||||||
|
- Greatly simplify the VerifyUsage timers
|
||||||
|
- Constructor cleanup
|
||||||
|
|
||||||
|
2012-06-19 zzz
|
||||||
|
* i2psnark:
|
||||||
|
- Hide buttons while stopping all
|
||||||
|
* Socks: Pass remote port through
|
||||||
|
* Streaming:
|
||||||
|
- Listen only on local port if set
|
||||||
|
- Listen only for streaming protocol if configured (new option)
|
||||||
|
- Javadocs re: ports
|
||||||
|
|
||||||
2012-06-18 zzz
|
2012-06-18 zzz
|
||||||
* i2psnark:
|
* i2psnark:
|
||||||
- Improve torrent shutdown handling to maximize chance of
|
- Improve torrent shutdown handling to maximize chance of
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
HTTP/1.1 404 Domain Not Found
|
HTTP/1.1 500 Domain Not Found
|
||||||
Content-Type: text/html; charset=UTF-8
|
Content-Type: text/html; charset=UTF-8
|
||||||
Cache-control: no-cache
|
Cache-control: no-cache
|
||||||
Connection: close
|
Connection: close
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
HTTP/1.1 404 Domain Not Found
|
HTTP/1.1 500 Domain Not Found
|
||||||
Content-Type: text/html; charset=UTF-8
|
Content-Type: text/html; charset=UTF-8
|
||||||
Cache-control: no-cache
|
Cache-control: no-cache
|
||||||
Connection: close
|
Connection: close
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
HTTP/1.1 404 Domaine non trouvé
|
HTTP/1.1 500 Domaine non trouvé
|
||||||
Content-Type: text/html; charset=UTF-8
|
Content-Type: text/html; charset=UTF-8
|
||||||
Cache-control: no-cache
|
Cache-control: no-cache
|
||||||
Connection: close
|
Connection: close
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
HTTP/1.1 404 Domain Not Found
|
HTTP/1.1 500 Domain Not Found
|
||||||
Content-Type: text/html; charset=UTF-8
|
Content-Type: text/html; charset=UTF-8
|
||||||
Cache-control: no-cache
|
Cache-control: no-cache
|
||||||
Connection: close
|
Connection: close
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
HTTP/1.1 404 Domain Not Found
|
HTTP/1.1 500 Domain Not Found
|
||||||
Content-Type: text/html; charset=UTF-8
|
Content-Type: text/html; charset=UTF-8
|
||||||
Cache-control: no-cache
|
Cache-control: no-cache
|
||||||
Connection: close
|
Connection: close
|
||||||
|
@ -1,4 +1,4 @@
|
|||||||
HTTP/1.1 404 Domain Not Found
|
HTTP/1.1 500 Domain Not Found
|
||||||
Content-Type: text/html; charset=UTF-8
|
Content-Type: text/html; charset=UTF-8
|
||||||
Cache-control: no-cache
|
Cache-control: no-cache
|
||||||
Connection: close
|
Connection: close
|
||||||
|
@ -83,7 +83,7 @@ public class GarlicMessage extends FastI2NPMessageImpl {
|
|||||||
public String toString() {
|
public String toString() {
|
||||||
StringBuilder buf = new StringBuilder();
|
StringBuilder buf = new StringBuilder();
|
||||||
buf.append("[GarlicMessage: ");
|
buf.append("[GarlicMessage: ");
|
||||||
buf.append("\n\tData length: ").append(getData().length).append(" bytes");
|
buf.append("Data length: ").append(getData().length).append(" bytes");
|
||||||
buf.append("]");
|
buf.append("]");
|
||||||
return buf.toString();
|
return buf.toString();
|
||||||
}
|
}
|
||||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
|||||||
/** deprecated */
|
/** deprecated */
|
||||||
public final static String ID = "Monotone";
|
public final static String ID = "Monotone";
|
||||||
public final static String VERSION = CoreVersion.VERSION;
|
public final static String VERSION = CoreVersion.VERSION;
|
||||||
public final static long BUILD = 16;
|
public final static long BUILD = 21;
|
||||||
|
|
||||||
/** for example "-test" */
|
/** for example "-test" */
|
||||||
public final static String EXTRA = "";
|
public final static String EXTRA = "";
|
||||||
|
@ -68,10 +68,11 @@ public interface TunnelInfo {
|
|||||||
*/
|
*/
|
||||||
public void testSuccessful(int responseTime);
|
public void testSuccessful(int responseTime);
|
||||||
|
|
||||||
public long getProcessedMessagesCount();
|
public int getProcessedMessagesCount();
|
||||||
|
|
||||||
/** we know for sure that this many bytes travelled through the tunnel in its lifetime */
|
/** we know for sure that this many bytes travelled through the tunnel in its lifetime */
|
||||||
public long getVerifiedBytesTransferred();
|
public long getVerifiedBytesTransferred();
|
||||||
|
|
||||||
/** we know for sure that the given number of bytes were sent down the tunnel fully */
|
/** we know for sure that the given number of bytes were sent down the tunnel fully */
|
||||||
public void incrementVerifiedBytesTransferred(int numBytes);
|
public void incrementVerifiedBytesTransferred(int numBytes);
|
||||||
|
|
||||||
|
@ -18,6 +18,7 @@ import java.util.List;
|
|||||||
import java.util.Locale;
|
import java.util.Locale;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
import net.i2p.client.I2PClient;
|
import net.i2p.client.I2PClient;
|
||||||
import net.i2p.crypto.SessionKeyManager;
|
import net.i2p.crypto.SessionKeyManager;
|
||||||
@ -86,7 +87,14 @@ class ClientConnectionRunner {
|
|||||||
private boolean _dead;
|
private boolean _dead;
|
||||||
/** For outbound traffic. true if i2cp.messageReliability = "none"; @since 0.8.1 */
|
/** For outbound traffic. true if i2cp.messageReliability = "none"; @since 0.8.1 */
|
||||||
private boolean _dontSendMSM;
|
private boolean _dontSendMSM;
|
||||||
|
private final AtomicInteger _messageId; // messageId counter
|
||||||
|
|
||||||
|
// Was 32767 since the beginning (04-2004).
|
||||||
|
// But it's 4 bytes in the I2CP spec and stored as a long in MessageID....
|
||||||
|
// If this is too low and wraps around, I2CP VerifyUsage could delete the wrong message,
|
||||||
|
// e.g. on local access
|
||||||
|
private static final int MAX_MESSAGE_ID = 0x4000000;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new runner against the given socket
|
* Create a new runner against the given socket
|
||||||
*
|
*
|
||||||
@ -99,6 +107,7 @@ class ClientConnectionRunner {
|
|||||||
_messages = new ConcurrentHashMap();
|
_messages = new ConcurrentHashMap();
|
||||||
_alreadyProcessed = new ArrayList();
|
_alreadyProcessed = new ArrayList();
|
||||||
_acceptedPending = new ConcurrentHashSet();
|
_acceptedPending = new ConcurrentHashSet();
|
||||||
|
_messageId = new AtomicInteger(_context.random().nextInt());
|
||||||
}
|
}
|
||||||
|
|
||||||
private static volatile int __id = 0;
|
private static volatile int __id = 0;
|
||||||
@ -520,18 +529,9 @@ class ClientConnectionRunner {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// this *should* be mod 65536, but UnsignedInteger is still b0rked. FIXME
|
public int getNextMessageId() {
|
||||||
private final static int MAX_MESSAGE_ID = 32767;
|
// Don't % so we don't get negative IDs
|
||||||
private static volatile int _messageId = RandomSource.getInstance().nextInt(MAX_MESSAGE_ID); // messageId counter
|
return _messageId.incrementAndGet() & (MAX_MESSAGE_ID - 1);
|
||||||
private final static Object _messageIdLock = new Object();
|
|
||||||
|
|
||||||
static int getNextMessageId() {
|
|
||||||
synchronized (_messageIdLock) {
|
|
||||||
int messageId = (++_messageId)%MAX_MESSAGE_ID;
|
|
||||||
if (_messageId >= MAX_MESSAGE_ID)
|
|
||||||
_messageId = 0;
|
|
||||||
return messageId;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -36,7 +36,7 @@ class MessageReceivedJob extends JobImpl {
|
|||||||
public void runJob() {
|
public void runJob() {
|
||||||
if (_runner.isDead()) return;
|
if (_runner.isDead()) return;
|
||||||
MessageId id = new MessageId();
|
MessageId id = new MessageId();
|
||||||
id.setMessageId(ClientConnectionRunner.getNextMessageId());
|
id.setMessageId(_runner.getNextMessageId());
|
||||||
_runner.setPayload(id, _payload);
|
_runner.setPayload(id, _payload);
|
||||||
messageAvailable(id, _payload.getSize());
|
messageAvailable(id, _payload.getSize());
|
||||||
}
|
}
|
||||||
|
@ -233,8 +233,8 @@ public class GarlicMessageBuilder {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (log.shouldLog(Log.DEBUG))
|
if (log.shouldLog(Log.DEBUG))
|
||||||
log.debug("CloveSet size for message " + msg.getUniqueId() + " is " + cloveSet.length
|
log.debug("CloveSet (" + config.getCloveCount() + " cloves) for message " + msg.getUniqueId() + " is " + cloveSet.length
|
||||||
+ " and encrypted message data is " + encData.length);
|
+ " bytes and encrypted message data is " + encData.length + " bytes");
|
||||||
|
|
||||||
return msg;
|
return msg;
|
||||||
}
|
}
|
||||||
@ -268,7 +268,7 @@ public class GarlicMessageBuilder {
|
|||||||
for (int i = 0; i < config.getCloveCount(); i++) {
|
for (int i = 0; i < config.getCloveCount(); i++) {
|
||||||
GarlicConfig c = config.getClove(i);
|
GarlicConfig c = config.getClove(i);
|
||||||
if (c instanceof PayloadGarlicConfig) {
|
if (c instanceof PayloadGarlicConfig) {
|
||||||
log.debug("Subclove IS a payload garlic clove");
|
//log.debug("Subclove IS a payload garlic clove");
|
||||||
cloves[i] = buildClove(ctx, (PayloadGarlicConfig)c);
|
cloves[i] = buildClove(ctx, (PayloadGarlicConfig)c);
|
||||||
} else {
|
} else {
|
||||||
log.debug("Subclove IS NOT a payload garlic clove");
|
log.debug("Subclove IS NOT a payload garlic clove");
|
||||||
|
@ -71,13 +71,13 @@ class GarlicMessageParser {
|
|||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("# cloves to read: " + numCloves);
|
_log.debug("# cloves to read: " + numCloves);
|
||||||
for (int i = 0; i < numCloves; i++) {
|
for (int i = 0; i < numCloves; i++) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("Reading clove " + i);
|
// _log.debug("Reading clove " + i);
|
||||||
GarlicClove clove = new GarlicClove(_context);
|
GarlicClove clove = new GarlicClove(_context);
|
||||||
offset += clove.readBytes(data, offset);
|
offset += clove.readBytes(data, offset);
|
||||||
set.addClove(clove);
|
set.addClove(clove);
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug("After reading clove " + i);
|
// _log.debug("After reading clove " + i);
|
||||||
}
|
}
|
||||||
//Certificate cert = new Certificate();
|
//Certificate cert = new Certificate();
|
||||||
//offset += cert.readBytes(data, offset);
|
//offset += cert.readBytes(data, offset);
|
||||||
|
@ -88,7 +88,7 @@ class OutboundClientMessageJobHelper {
|
|||||||
PayloadGarlicConfig dataClove, Hash from, Destination dest, TunnelInfo replyTunnel, boolean requireAck,
|
PayloadGarlicConfig dataClove, Hash from, Destination dest, TunnelInfo replyTunnel, boolean requireAck,
|
||||||
LeaseSet bundledReplyLeaseSet) {
|
LeaseSet bundledReplyLeaseSet) {
|
||||||
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
|
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
|
||||||
if (log.shouldLog(Log.DEBUG))
|
if (replyToken >= 0 && log.shouldLog(Log.DEBUG))
|
||||||
log.debug("Reply token: " + replyToken);
|
log.debug("Reply token: " + replyToken);
|
||||||
GarlicConfig config = new GarlicConfig();
|
GarlicConfig config = new GarlicConfig();
|
||||||
|
|
||||||
@ -136,20 +136,17 @@ class OutboundClientMessageJobHelper {
|
|||||||
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
|
Log log = ctx.logManager().getLog(OutboundClientMessageJobHelper.class);
|
||||||
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
|
PayloadGarlicConfig ackClove = new PayloadGarlicConfig();
|
||||||
|
|
||||||
Hash replyToTunnelRouter = null; // inbound tunnel gateway
|
|
||||||
TunnelId replyToTunnelId = null; // tunnel id on that gateway
|
|
||||||
|
|
||||||
if (replyToTunnel == null) {
|
if (replyToTunnel == null) {
|
||||||
if (log.shouldLog(Log.WARN))
|
if (log.shouldLog(Log.WARN))
|
||||||
log.warn("Unable to send client message from " + from.toBase64()
|
log.warn("Unable to send client message from " + from.toBase64()
|
||||||
+ ", as there are no inbound tunnels available");
|
+ ", as there are no inbound tunnels available");
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
replyToTunnelId = replyToTunnel.getReceiveTunnelId(0);
|
TunnelId replyToTunnelId = replyToTunnel.getReceiveTunnelId(0); // tunnel id on that gateway
|
||||||
replyToTunnelRouter = replyToTunnel.getPeer(0);
|
Hash replyToTunnelRouter = replyToTunnel.getPeer(0); // inbound tunnel gateway
|
||||||
if (log.shouldLog(Log.DEBUG))
|
if (log.shouldLog(Log.DEBUG))
|
||||||
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId
|
log.debug("Ack for the data message will come back along tunnel " + replyToTunnelId
|
||||||
+ ":\n" + replyToTunnel);
|
+ ": " + replyToTunnel);
|
||||||
|
|
||||||
DeliveryInstructions ackInstructions = new DeliveryInstructions();
|
DeliveryInstructions ackInstructions = new DeliveryInstructions();
|
||||||
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
|
ackInstructions.setDeliveryMode(DeliveryInstructions.DELIVERY_MODE_TUNNEL);
|
||||||
@ -163,8 +160,8 @@ class OutboundClientMessageJobHelper {
|
|||||||
DeliveryStatusMessage msg = new DeliveryStatusMessage(ctx);
|
DeliveryStatusMessage msg = new DeliveryStatusMessage(ctx);
|
||||||
msg.setArrival(ctx.clock().now());
|
msg.setArrival(ctx.clock().now());
|
||||||
msg.setMessageId(replyToken);
|
msg.setMessageId(replyToken);
|
||||||
if (log.shouldLog(Log.DEBUG))
|
//if (log.shouldLog(Log.DEBUG))
|
||||||
log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
|
// log.debug("Delivery status message key: " + replyToken + " arrival: " + msg.getArrival());
|
||||||
|
|
||||||
ackClove.setCertificate(Certificate.NULL_CERT);
|
ackClove.setCertificate(Certificate.NULL_CERT);
|
||||||
ackClove.setDeliveryInstructions(ackInstructions);
|
ackClove.setDeliveryInstructions(ackInstructions);
|
||||||
@ -175,11 +172,11 @@ class OutboundClientMessageJobHelper {
|
|||||||
// defaults
|
// defaults
|
||||||
//ackClove.setRequestAck(false);
|
//ackClove.setRequestAck(false);
|
||||||
|
|
||||||
if (log.shouldLog(Log.DEBUG))
|
//if (log.shouldLog(Log.DEBUG))
|
||||||
log.debug("Delivery status message is targetting us ["
|
// log.debug("Delivery status message is targetting us ["
|
||||||
+ ackClove.getRecipient().getIdentity().getHash().toBase64()
|
// + ackClove.getRecipient().getIdentity().getHash().toBase64()
|
||||||
+ "] via tunnel " + replyToTunnelId.getTunnelId() + " on "
|
// + "] via tunnel " + replyToTunnelId.getTunnelId() + " on "
|
||||||
+ replyToTunnelRouter.toBase64());
|
// + replyToTunnelRouter.toBase64());
|
||||||
|
|
||||||
return ackClove;
|
return ackClove;
|
||||||
}
|
}
|
||||||
|
@ -150,8 +150,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
overallExpiration = timeoutMs + _start;
|
overallExpiration = timeoutMs + _start;
|
||||||
if (_log.shouldLog(Log.INFO))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.info(getJobId() + " Default Expiration (ms): " + timeoutMs);
|
_log.debug(getJobId() + " Default Expiration (ms): " + timeoutMs);
|
||||||
}
|
}
|
||||||
_overallExpiration = overallExpiration;
|
_overallExpiration = overallExpiration;
|
||||||
}
|
}
|
||||||
@ -182,9 +182,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
dieFatal();
|
dieFatal();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getJobId() + ": Send outbound client message job beginning" +
|
// _log.debug(getJobId() + ": Send outbound client message job beginning" +
|
||||||
": preparing to search for the leaseSet for " + _toString);
|
// ": preparing to search for the leaseSet for " + _toString);
|
||||||
long timeoutMs = _overallExpiration - now;
|
long timeoutMs = _overallExpiration - now;
|
||||||
Hash key = _to.calculateHash();
|
Hash key = _to.calculateHash();
|
||||||
SendJob success = new SendJob(getContext());
|
SendJob success = new SendJob(getContext());
|
||||||
@ -474,8 +474,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
dieFatal();
|
dieFatal();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getJobId() + ": Clove built to " + _toString);
|
// _log.debug(getJobId() + ": Clove built to " + _toString);
|
||||||
long msgExpiration = _overallExpiration; // getContext().clock().now() + OVERALL_TIMEOUT_MS_DEFAULT;
|
long msgExpiration = _overallExpiration; // getContext().clock().now() + OVERALL_TIMEOUT_MS_DEFAULT;
|
||||||
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
|
GarlicMessage msg = OutboundClientMessageJobHelper.createGarlicMessage(getContext(), token,
|
||||||
msgExpiration, key,
|
msgExpiration, key,
|
||||||
@ -494,8 +494,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getJobId() + ": send() - token expected " + token + " to " + _toString);
|
// _log.debug(getJobId() + ": send() - token expected " + token + " to " + _toString);
|
||||||
|
|
||||||
SendSuccessJob onReply = null;
|
SendSuccessJob onReply = null;
|
||||||
SendTimeoutJob onFail = null;
|
SendTimeoutJob onFail = null;
|
||||||
@ -515,14 +515,14 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getJobId() + ": Placing GarlicMessage into the new tunnel message bound for "
|
_log.debug(getJobId() + ": GarlicMessage in new tunnel msg for "
|
||||||
+ _toString + " at "
|
+ _toString + " at "
|
||||||
+ _lease.getTunnelId() + " on "
|
+ _lease.getTunnelId() + " on "
|
||||||
+ _lease.getGateway());
|
+ _lease.getGateway());
|
||||||
|
|
||||||
if (_outTunnel != null) {
|
if (_outTunnel != null) {
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getJobId() + ": Sending tunnel message out " + _outTunnel.getSendTunnelId(0) + " to "
|
_log.debug(getJobId() + ": Sending msg out " + _outTunnel.getSendTunnelId(0) + " to "
|
||||||
+ _toString + " at "
|
+ _toString + " at "
|
||||||
+ _lease.getTunnelId() + " on "
|
+ _lease.getTunnelId() + " on "
|
||||||
+ _lease.getGateway());
|
+ _lease.getGateway());
|
||||||
@ -571,9 +571,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
long before = getContext().clock().now();
|
long before = getContext().clock().now();
|
||||||
getContext().tunnelDispatcher().dispatchOutbound(_msg, _outTunnel.getSendTunnelId(0), _lease.getTunnelId(), _lease.getGateway());
|
getContext().tunnelDispatcher().dispatchOutbound(_msg, _outTunnel.getSendTunnelId(0), _lease.getTunnelId(), _lease.getGateway());
|
||||||
long dispatchSendTime = getContext().clock().now() - before;
|
long dispatchSendTime = getContext().clock().now() - before;
|
||||||
if (_log.shouldLog(Log.INFO))
|
//if (_log.shouldLog(Log.INFO))
|
||||||
_log.info(OutboundClientMessageOneShotJob.this.getJobId() +
|
// _log.info(OutboundClientMessageOneShotJob.this.getJobId() +
|
||||||
": Dispatching message to " + _toString + " complete");
|
// ": Dispatching message to " + _toString + " complete");
|
||||||
getContext().statManager().addRateData("client.dispatchTime", getContext().clock().now() - _start, 0);
|
getContext().statManager().addRateData("client.dispatchTime", getContext().clock().now() - _start, 0);
|
||||||
getContext().statManager().addRateData("client.dispatchSendTime", dispatchSendTime, 0);
|
getContext().statManager().addRateData("client.dispatchSendTime", dispatchSendTime, 0);
|
||||||
}
|
}
|
||||||
@ -728,8 +728,8 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
_clove = clove;
|
_clove = clove;
|
||||||
_cloveId = _clove.getId();
|
_cloveId = _clove.getId();
|
||||||
|
|
||||||
if (_log.shouldLog(Log.DEBUG))
|
//if (_log.shouldLog(Log.DEBUG))
|
||||||
_log.debug(getJobId() + ": Built payload clove with id " + clove.getId());
|
// _log.debug(getJobId() + ": Built payload clove with id " + clove.getId());
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -858,9 +858,9 @@ public class OutboundClientMessageOneShotJob extends JobImpl {
|
|||||||
public String getName() { return "Outbound client message send timeout"; }
|
public String getName() { return "Outbound client message send timeout"; }
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
if (_log.shouldLog(Log.INFO))
|
//if (_log.shouldLog(Log.INFO))
|
||||||
_log.info(OutboundClientMessageOneShotJob.this.getJobId()
|
// _log.info(OutboundClientMessageOneShotJob.this.getJobId()
|
||||||
+ ": Soft timeout through the lease " + _lease);
|
// + ": Soft timeout through the lease " + _lease);
|
||||||
|
|
||||||
// unused
|
// unused
|
||||||
//_lease.setNumFailure(_lease.getNumFailure()+1);
|
//_lease.setNumFailure(_lease.getNumFailure()+1);
|
||||||
|
@ -21,6 +21,13 @@ import net.i2p.router.ReplyJob;
|
|||||||
import net.i2p.router.RouterContext;
|
import net.i2p.router.RouterContext;
|
||||||
import net.i2p.util.Log;
|
import net.i2p.util.Log;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Send a message directly to another router, i.e. not through a tunnel.
|
||||||
|
* This is safe to run inline via runJob().
|
||||||
|
* If the RouterInfo for the Hash is not found locally, it will
|
||||||
|
* queue a lookup and register itself to be run again when the lookup
|
||||||
|
* succeeds or times out.
|
||||||
|
*/
|
||||||
public class SendMessageDirectJob extends JobImpl {
|
public class SendMessageDirectJob extends JobImpl {
|
||||||
private final Log _log;
|
private final Log _log;
|
||||||
private final I2NPMessage _message;
|
private final I2NPMessage _message;
|
||||||
@ -39,9 +46,11 @@ public class SendMessageDirectJob extends JobImpl {
|
|||||||
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int timeoutMs, int priority) {
|
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int timeoutMs, int priority) {
|
||||||
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority);
|
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
|
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
|
||||||
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority);
|
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority);
|
||||||
}
|
}
|
||||||
|
|
||||||
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
|
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
|
||||||
super(ctx);
|
super(ctx);
|
||||||
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
|
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
|
||||||
@ -66,6 +75,7 @@ public class SendMessageDirectJob extends JobImpl {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public String getName() { return "Send Message Direct"; }
|
public String getName() { return "Send Message Direct"; }
|
||||||
|
|
||||||
public void runJob() {
|
public void runJob() {
|
||||||
long now = getContext().clock().now();
|
long now = getContext().clock().now();
|
||||||
|
|
||||||
|
@ -16,7 +16,7 @@ abstract class BadCountries {
|
|||||||
// zzz.i2p/topics/969
|
// zzz.i2p/topics/969
|
||||||
// List created based on the Press Freedom Index. Those countries with a score of higher than 50 are included:
|
// List created based on the Press Freedom Index. Those countries with a score of higher than 50 are included:
|
||||||
// http://en.wikipedia.org/wiki/Press_Freedom_Index
|
// http://en.wikipedia.org/wiki/Press_Freedom_Index
|
||||||
// Except:
|
// Except (quote):
|
||||||
// I don't really think that is usage of I2P is dangerous in countries from CIS
|
// I don't really think that is usage of I2P is dangerous in countries from CIS
|
||||||
// General situation is really bad (like in Russia) but people here doesn't have problems with Ecnryption usage.
|
// General situation is really bad (like in Russia) but people here doesn't have problems with Ecnryption usage.
|
||||||
|
|
||||||
@ -32,6 +32,7 @@ abstract class BadCountries {
|
|||||||
/* Democratic Republic of the Congo */ "CD",
|
/* Democratic Republic of the Congo */ "CD",
|
||||||
/* Equatorial Guinea */ "GQ",
|
/* Equatorial Guinea */ "GQ",
|
||||||
/* Eritrea */ "ER",
|
/* Eritrea */ "ER",
|
||||||
|
/* Ethiopia */ "ET",
|
||||||
/* Fiji */ "FJ",
|
/* Fiji */ "FJ",
|
||||||
/* Honduras */ "HN",
|
/* Honduras */ "HN",
|
||||||
/* Iran */ "IR",
|
/* Iran */ "IR",
|
||||||
|
@ -29,7 +29,7 @@ public class TunnelCreatorConfig implements TunnelInfo {
|
|||||||
private List<Integer> _order;
|
private List<Integer> _order;
|
||||||
private long _replyMessageId;
|
private long _replyMessageId;
|
||||||
private final boolean _isInbound;
|
private final boolean _isInbound;
|
||||||
private long _messagesProcessed;
|
private int _messagesProcessed;
|
||||||
private volatile long _verifiedBytesTransferred;
|
private volatile long _verifiedBytesTransferred;
|
||||||
private boolean _failed;
|
private boolean _failed;
|
||||||
private int _failures;
|
private int _failures;
|
||||||
@ -127,7 +127,7 @@ public class TunnelCreatorConfig implements TunnelInfo {
|
|||||||
|
|
||||||
/** take note of a message being pumped through this tunnel */
|
/** take note of a message being pumped through this tunnel */
|
||||||
public void incrementProcessedMessages() { _messagesProcessed++; }
|
public void incrementProcessedMessages() { _messagesProcessed++; }
|
||||||
public long getProcessedMessagesCount() { return _messagesProcessed; }
|
public int getProcessedMessagesCount() { return _messagesProcessed; }
|
||||||
|
|
||||||
public void incrementVerifiedBytesTransferred(int bytes) {
|
public void incrementVerifiedBytesTransferred(int bytes) {
|
||||||
_verifiedBytesTransferred += bytes;
|
_verifiedBytesTransferred += bytes;
|
||||||
@ -144,6 +144,7 @@ public class TunnelCreatorConfig implements TunnelInfo {
|
|||||||
_context.profileManager().tunnelDataPushed1m(_peers[i], (int)normalized);
|
_context.profileManager().tunnelDataPushed1m(_peers[i], (int)normalized);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public long getVerifiedBytesTransferred() { return _verifiedBytesTransferred; }
|
public long getVerifiedBytesTransferred() { return _verifiedBytesTransferred; }
|
||||||
|
|
||||||
private static final int THROUGHPUT_COUNT = 3;
|
private static final int THROUGHPUT_COUNT = 3;
|
||||||
|
@ -15,8 +15,13 @@ import net.i2p.router.TunnelPoolSettings;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class ClientPeerSelector extends TunnelPeerSelector {
|
class ClientPeerSelector extends TunnelPeerSelector {
|
||||||
public List<Hash> selectPeers(RouterContext ctx, TunnelPoolSettings settings) {
|
|
||||||
int length = getLength(ctx, settings);
|
public ClientPeerSelector(RouterContext context) {
|
||||||
|
super(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Hash> selectPeers(TunnelPoolSettings settings) {
|
||||||
|
int length = getLength(settings);
|
||||||
if (length < 0)
|
if (length < 0)
|
||||||
return null;
|
return null;
|
||||||
if ( (length == 0) && (settings.getLength()+settings.getLengthVariance() > 0) )
|
if ( (length == 0) && (settings.getLength()+settings.getLengthVariance() > 0) )
|
||||||
@ -26,9 +31,9 @@ class ClientPeerSelector extends TunnelPeerSelector {
|
|||||||
|
|
||||||
if (length > 0) {
|
if (length > 0) {
|
||||||
if (shouldSelectExplicit(settings))
|
if (shouldSelectExplicit(settings))
|
||||||
return selectExplicit(ctx, settings, length);
|
return selectExplicit(settings, length);
|
||||||
|
|
||||||
Set<Hash> exclude = getExclude(ctx, settings.isInbound(), settings.isExploratory());
|
Set<Hash> exclude = getExclude(settings.isInbound(), false);
|
||||||
Set<Hash> matches = new HashSet(length);
|
Set<Hash> matches = new HashSet(length);
|
||||||
if (length == 1) {
|
if (length == 1) {
|
||||||
ctx.profileOrganizer().selectFastPeers(length, exclude, matches, 0);
|
ctx.profileOrganizer().selectFastPeers(length, exclude, matches, 0);
|
||||||
@ -41,6 +46,9 @@ class ClientPeerSelector extends TunnelPeerSelector {
|
|||||||
rv = new ArrayList(length + 1);
|
rv = new ArrayList(length + 1);
|
||||||
// OBEP or IB last hop
|
// OBEP or IB last hop
|
||||||
// group 0 or 1 if two hops, otherwise group 0
|
// group 0 or 1 if two hops, otherwise group 0
|
||||||
|
if (!settings.isInbound()) {
|
||||||
|
// exclude existing OBEPs to get some diversity
|
||||||
|
}
|
||||||
ctx.profileOrganizer().selectFastPeers(1, exclude, matches, settings.getRandomKey(), length == 2 ? 2 : 4);
|
ctx.profileOrganizer().selectFastPeers(1, exclude, matches, settings.getRandomKey(), length == 2 ? 2 : 4);
|
||||||
matches.remove(ctx.routerHash());
|
matches.remove(ctx.routerHash());
|
||||||
exclude.addAll(matches);
|
exclude.addAll(matches);
|
||||||
@ -64,6 +72,9 @@ class ClientPeerSelector extends TunnelPeerSelector {
|
|||||||
}
|
}
|
||||||
// IBGW or OB first hop
|
// IBGW or OB first hop
|
||||||
// group 2 or 3 if two hops, otherwise group 1
|
// group 2 or 3 if two hops, otherwise group 1
|
||||||
|
if (settings.isInbound()) {
|
||||||
|
// exclude existing IBGWs to get some diversity
|
||||||
|
}
|
||||||
ctx.profileOrganizer().selectFastPeers(1, exclude, matches, settings.getRandomKey(), length == 2 ? 3 : 5);
|
ctx.profileOrganizer().selectFastPeers(1, exclude, matches, settings.getRandomKey(), length == 2 ? 3 : 5);
|
||||||
matches.remove(ctx.routerHash());
|
matches.remove(ctx.routerHash());
|
||||||
rv.addAll(matches);
|
rv.addAll(matches);
|
||||||
|
@ -18,9 +18,14 @@ import net.i2p.util.Log;
|
|||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
class ExploratoryPeerSelector extends TunnelPeerSelector {
|
class ExploratoryPeerSelector extends TunnelPeerSelector {
|
||||||
public List<Hash> selectPeers(RouterContext ctx, TunnelPoolSettings settings) {
|
|
||||||
|
public ExploratoryPeerSelector(RouterContext context) {
|
||||||
|
super(context);
|
||||||
|
}
|
||||||
|
|
||||||
|
public List<Hash> selectPeers(TunnelPoolSettings settings) {
|
||||||
Log l = ctx.logManager().getLog(getClass());
|
Log l = ctx.logManager().getLog(getClass());
|
||||||
int length = getLength(ctx, settings);
|
int length = getLength(settings);
|
||||||
if (length < 0) {
|
if (length < 0) {
|
||||||
if (l.shouldLog(Log.DEBUG))
|
if (l.shouldLog(Log.DEBUG))
|
||||||
l.debug("Length requested is zero: " + settings);
|
l.debug("Length requested is zero: " + settings);
|
||||||
@ -28,13 +33,13 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (false && shouldSelectExplicit(settings)) {
|
if (false && shouldSelectExplicit(settings)) {
|
||||||
List rv = selectExplicit(ctx, settings, length);
|
List rv = selectExplicit(settings, length);
|
||||||
if (l.shouldLog(Log.DEBUG))
|
if (l.shouldLog(Log.DEBUG))
|
||||||
l.debug("Explicit peers selected: " + rv);
|
l.debug("Explicit peers selected: " + rv);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
Set<Hash> exclude = getExclude(ctx, settings.isInbound(), settings.isExploratory());
|
Set<Hash> exclude = getExclude(settings.isInbound(), true);
|
||||||
exclude.add(ctx.routerHash());
|
exclude.add(ctx.routerHash());
|
||||||
// Don't use ff peers for exploratory tunnels to lessen exposure to netDb searches and stores
|
// Don't use ff peers for exploratory tunnels to lessen exposure to netDb searches and stores
|
||||||
// Hmm if they don't get explored they don't get a speed/capacity rating
|
// Hmm if they don't get explored they don't get a speed/capacity rating
|
||||||
@ -42,7 +47,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
// FloodfillNetworkDatabaseFacade fac = (FloodfillNetworkDatabaseFacade)ctx.netDb();
|
// FloodfillNetworkDatabaseFacade fac = (FloodfillNetworkDatabaseFacade)ctx.netDb();
|
||||||
// exclude.addAll(fac.getFloodfillPeers());
|
// exclude.addAll(fac.getFloodfillPeers());
|
||||||
HashSet matches = new HashSet(length);
|
HashSet matches = new HashSet(length);
|
||||||
boolean exploreHighCap = shouldPickHighCap(ctx);
|
boolean exploreHighCap = shouldPickHighCap();
|
||||||
|
|
||||||
//
|
//
|
||||||
// We don't honor IP Restriction here, to be fixed
|
// We don't honor IP Restriction here, to be fixed
|
||||||
@ -84,7 +89,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
* build success rate is much worse, return true so that reliability
|
* build success rate is much worse, return true so that reliability
|
||||||
* is maintained.
|
* is maintained.
|
||||||
*/
|
*/
|
||||||
private static boolean shouldPickHighCap(RouterContext ctx) {
|
private boolean shouldPickHighCap() {
|
||||||
if (ctx.getBooleanProperty("router.exploreHighCapacity"))
|
if (ctx.getBooleanProperty("router.exploreHighCapacity"))
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
@ -118,7 +123,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
if (ctx.router().getUptime() <= 11*60*1000) {
|
if (ctx.router().getUptime() <= 11*60*1000) {
|
||||||
failPct = 100 - MIN_NONFAILING_PCT;
|
failPct = 100 - MIN_NONFAILING_PCT;
|
||||||
} else {
|
} else {
|
||||||
failPct = getExploratoryFailPercentage(ctx);
|
failPct = getExploratoryFailPercentage();
|
||||||
//Log l = ctx.logManager().getLog(getClass());
|
//Log l = ctx.logManager().getLog(getClass());
|
||||||
//if (l.shouldLog(Log.DEBUG))
|
//if (l.shouldLog(Log.DEBUG))
|
||||||
// l.debug("Normalized Fail pct: " + failPct);
|
// l.debug("Normalized Fail pct: " + failPct);
|
||||||
@ -140,9 +145,9 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
* Even this isn't the "true" rate for the NonFailingPeers pool, since we
|
* Even this isn't the "true" rate for the NonFailingPeers pool, since we
|
||||||
* are often building exploratory tunnels using the HighCapacity pool.
|
* are often building exploratory tunnels using the HighCapacity pool.
|
||||||
*/
|
*/
|
||||||
private static int getExploratoryFailPercentage(RouterContext ctx) {
|
private int getExploratoryFailPercentage() {
|
||||||
int c = getFailPercentage(ctx, "Client");
|
int c = getFailPercentage("Client");
|
||||||
int e = getFailPercentage(ctx, "Exploratory");
|
int e = getFailPercentage("Exploratory");
|
||||||
//Log l = ctx.logManager().getLog(getClass());
|
//Log l = ctx.logManager().getLog(getClass());
|
||||||
//if (l.shouldLog(Log.DEBUG))
|
//if (l.shouldLog(Log.DEBUG))
|
||||||
// l.debug("Client, Expl. Fail pct: " + c + ", " + e);
|
// l.debug("Client, Expl. Fail pct: " + c + ", " + e);
|
||||||
@ -154,11 +159,11 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
return (100 * (e-c)) / (100-c);
|
return (100 * (e-c)) / (100-c);
|
||||||
}
|
}
|
||||||
|
|
||||||
private static int getFailPercentage(RouterContext ctx, String t) {
|
private int getFailPercentage(String t) {
|
||||||
String pfx = "tunnel.build" + t;
|
String pfx = "tunnel.build" + t;
|
||||||
int timeout = getEvents(ctx, pfx + "Expire", 10*60*1000);
|
int timeout = getEvents(pfx + "Expire", 10*60*1000);
|
||||||
int reject = getEvents(ctx, pfx + "Reject", 10*60*1000);
|
int reject = getEvents(pfx + "Reject", 10*60*1000);
|
||||||
int accept = getEvents(ctx, pfx + "Success", 10*60*1000);
|
int accept = getEvents(pfx + "Success", 10*60*1000);
|
||||||
if (accept + reject + timeout <= 0)
|
if (accept + reject + timeout <= 0)
|
||||||
return 0;
|
return 0;
|
||||||
double pct = (double)(reject + timeout) / (accept + reject + timeout);
|
double pct = (double)(reject + timeout) / (accept + reject + timeout);
|
||||||
@ -166,7 +171,7 @@ class ExploratoryPeerSelector extends TunnelPeerSelector {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/** Use current + last to get more recent and smoother data */
|
/** Use current + last to get more recent and smoother data */
|
||||||
private static int getEvents(RouterContext ctx, String stat, long period) {
|
private int getEvents(String stat, long period) {
|
||||||
RateStat rs = ctx.statManager().getRate(stat);
|
RateStat rs = ctx.statManager().getRate(stat);
|
||||||
if (rs == null)
|
if (rs == null)
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -31,6 +31,12 @@ import net.i2p.util.VersionComparator;
|
|||||||
* Todo: there's nothing non-static in here
|
* Todo: there's nothing non-static in here
|
||||||
*/
|
*/
|
||||||
public abstract class TunnelPeerSelector {
|
public abstract class TunnelPeerSelector {
|
||||||
|
protected final RouterContext ctx;
|
||||||
|
|
||||||
|
protected TunnelPeerSelector(RouterContext context) {
|
||||||
|
ctx = context;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Which peers should go into the next tunnel for the given settings?
|
* Which peers should go into the next tunnel for the given settings?
|
||||||
*
|
*
|
||||||
@ -40,12 +46,12 @@ public abstract class TunnelPeerSelector {
|
|||||||
* to build through, and the settings reject 0 hop tunnels, this will
|
* to build through, and the settings reject 0 hop tunnels, this will
|
||||||
* return null.
|
* return null.
|
||||||
*/
|
*/
|
||||||
public abstract List<Hash> selectPeers(RouterContext ctx, TunnelPoolSettings settings);
|
public abstract List<Hash> selectPeers(TunnelPoolSettings settings);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return randomized number of hops 0-7, not including ourselves
|
* @return randomized number of hops 0-7, not including ourselves
|
||||||
*/
|
*/
|
||||||
protected int getLength(RouterContext ctx, TunnelPoolSettings settings) {
|
protected int getLength(TunnelPoolSettings settings) {
|
||||||
int length = settings.getLength();
|
int length = settings.getLength();
|
||||||
int override = settings.getLengthOverride();
|
int override = settings.getLengthOverride();
|
||||||
if (override >= 0) {
|
if (override >= 0) {
|
||||||
@ -109,7 +115,7 @@ public abstract class TunnelPeerSelector {
|
|||||||
* Needs analysis and testing
|
* Needs analysis and testing
|
||||||
* @return should always be false
|
* @return should always be false
|
||||||
*/
|
*/
|
||||||
protected List<Hash> selectExplicit(RouterContext ctx, TunnelPoolSettings settings, int length) {
|
protected List<Hash> selectExplicit(TunnelPoolSettings settings, int length) {
|
||||||
String peers = null;
|
String peers = null;
|
||||||
Properties opts = settings.getUnknownOptions();
|
Properties opts = settings.getUnknownOptions();
|
||||||
if (opts != null)
|
if (opts != null)
|
||||||
@ -173,7 +179,7 @@ public abstract class TunnelPeerSelector {
|
|||||||
/**
|
/**
|
||||||
* Pick peers that we want to avoid
|
* Pick peers that we want to avoid
|
||||||
*/
|
*/
|
||||||
public Set<Hash> getExclude(RouterContext ctx, boolean isInbound, boolean isExploratory) {
|
public Set<Hash> getExclude(boolean isInbound, boolean isExploratory) {
|
||||||
// we may want to update this to skip 'hidden' or 'unreachable' peers, but that
|
// we may want to update this to skip 'hidden' or 'unreachable' peers, but that
|
||||||
// isn't safe, since they may publish one set of routerInfo to us and another to
|
// isn't safe, since they may publish one set of routerInfo to us and another to
|
||||||
// other peers. the defaults for filterUnreachable has always been to return false,
|
// other peers. the defaults for filterUnreachable has always been to return false,
|
||||||
@ -196,7 +202,7 @@ public abstract class TunnelPeerSelector {
|
|||||||
peers.addAll(ctx.profileOrganizer().selectPeersRecentlyRejecting());
|
peers.addAll(ctx.profileOrganizer().selectPeersRecentlyRejecting());
|
||||||
peers.addAll(ctx.tunnelManager().selectPeersInTooManyTunnels());
|
peers.addAll(ctx.tunnelManager().selectPeersInTooManyTunnels());
|
||||||
// if (false && filterUnreachable(ctx, isInbound, isExploratory)) {
|
// if (false && filterUnreachable(ctx, isInbound, isExploratory)) {
|
||||||
if (filterUnreachable(ctx, isInbound, isExploratory)) {
|
if (filterUnreachable(isInbound, isExploratory)) {
|
||||||
// NOTE: filterUnreachable returns true for inbound, false for outbound
|
// NOTE: filterUnreachable returns true for inbound, false for outbound
|
||||||
// This is the only use for getPeersByCapability? And the whole set of datastructures in PeerManager?
|
// This is the only use for getPeersByCapability? And the whole set of datastructures in PeerManager?
|
||||||
Collection<Hash> caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
|
Collection<Hash> caps = ctx.peerManager().getPeersByCapability(Router.CAPABILITY_UNREACHABLE);
|
||||||
@ -439,7 +445,7 @@ public abstract class TunnelPeerSelector {
|
|||||||
* do we want to skip peers who haven't been up for long?
|
* do we want to skip peers who haven't been up for long?
|
||||||
* @return true for inbound, false for outbound, unless configured otherwise
|
* @return true for inbound, false for outbound, unless configured otherwise
|
||||||
*/
|
*/
|
||||||
protected boolean filterUnreachable(RouterContext ctx, boolean isInbound, boolean isExploratory) {
|
protected boolean filterUnreachable(boolean isInbound, boolean isExploratory) {
|
||||||
boolean def = false;
|
boolean def = false;
|
||||||
String val = null;
|
String val = null;
|
||||||
|
|
||||||
|
@ -1086,13 +1086,14 @@ public class TunnelPool {
|
|||||||
for (int i = len - 1; i >= 0; i--) {
|
for (int i = len - 1; i >= 0; i--) {
|
||||||
peers.add(ti.getPeer(i));
|
peers.add(ti.getPeer(i));
|
||||||
}
|
}
|
||||||
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (peers == null) {
|
if (peers == null) {
|
||||||
setLengthOverride();
|
setLengthOverride();
|
||||||
peers = _peerSelector.selectPeers(_context, settings);
|
peers = _peerSelector.selectPeers(settings);
|
||||||
}
|
}
|
||||||
|
|
||||||
if ( (peers == null) || (peers.isEmpty()) ) {
|
if ( (peers == null) || (peers.isEmpty()) ) {
|
||||||
|
@ -41,6 +41,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
private TunnelPool _outboundExploratory;
|
private TunnelPool _outboundExploratory;
|
||||||
private final BuildExecutor _executor;
|
private final BuildExecutor _executor;
|
||||||
private final BuildHandler _handler;
|
private final BuildHandler _handler;
|
||||||
|
private final TunnelPeerSelector _clientPeerSelector;
|
||||||
private boolean _isShutdown;
|
private boolean _isShutdown;
|
||||||
private final int _numHandlerThreads;
|
private final int _numHandlerThreads;
|
||||||
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
|
private static final long[] RATES = { 60*1000, 10*60*1000l, 60*60*1000l };
|
||||||
@ -60,6 +61,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
|
|
||||||
_clientInboundPools = new ConcurrentHashMap(4);
|
_clientInboundPools = new ConcurrentHashMap(4);
|
||||||
_clientOutboundPools = new ConcurrentHashMap(4);
|
_clientOutboundPools = new ConcurrentHashMap(4);
|
||||||
|
_clientPeerSelector = new ClientPeerSelector(ctx);
|
||||||
|
|
||||||
_executor = new BuildExecutor(ctx, this);
|
_executor = new BuildExecutor(ctx, this);
|
||||||
I2PThread execThread = new I2PThread(_executor, "BuildExecutor", true);
|
I2PThread execThread = new I2PThread(_executor, "BuildExecutor", true);
|
||||||
@ -407,8 +409,6 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
settings.getOutboundSettings().setDestination(dest);
|
settings.getOutboundSettings().setDestination(dest);
|
||||||
TunnelPool inbound = null;
|
TunnelPool inbound = null;
|
||||||
TunnelPool outbound = null;
|
TunnelPool outbound = null;
|
||||||
// should we share the clientPeerSelector across both inbound and outbound?
|
|
||||||
// or just one for all clients? why separate?
|
|
||||||
|
|
||||||
boolean delayOutbound = false;
|
boolean delayOutbound = false;
|
||||||
// synch with removeTunnels() below
|
// synch with removeTunnels() below
|
||||||
@ -416,7 +416,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
inbound = _clientInboundPools.get(dest);
|
inbound = _clientInboundPools.get(dest);
|
||||||
if (inbound == null) {
|
if (inbound == null) {
|
||||||
inbound = new TunnelPool(_context, this, settings.getInboundSettings(),
|
inbound = new TunnelPool(_context, this, settings.getInboundSettings(),
|
||||||
new ClientPeerSelector());
|
_clientPeerSelector);
|
||||||
_clientInboundPools.put(dest, inbound);
|
_clientInboundPools.put(dest, inbound);
|
||||||
} else {
|
} else {
|
||||||
inbound.setSettings(settings.getInboundSettings());
|
inbound.setSettings(settings.getInboundSettings());
|
||||||
@ -424,7 +424,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
outbound = _clientOutboundPools.get(dest);
|
outbound = _clientOutboundPools.get(dest);
|
||||||
if (outbound == null) {
|
if (outbound == null) {
|
||||||
outbound = new TunnelPool(_context, this, settings.getOutboundSettings(),
|
outbound = new TunnelPool(_context, this, settings.getOutboundSettings(),
|
||||||
new ClientPeerSelector());
|
_clientPeerSelector);
|
||||||
_clientOutboundPools.put(dest, outbound);
|
_clientOutboundPools.put(dest, outbound);
|
||||||
delayOutbound = true;
|
delayOutbound = true;
|
||||||
} else {
|
} else {
|
||||||
@ -511,7 +511,7 @@ public class TunnelPoolManager implements TunnelManagerFacade {
|
|||||||
t.setDaemon(true);
|
t.setDaemon(true);
|
||||||
t.start();
|
t.start();
|
||||||
}
|
}
|
||||||
ExploratoryPeerSelector selector = new ExploratoryPeerSelector();
|
ExploratoryPeerSelector selector = new ExploratoryPeerSelector(_context);
|
||||||
|
|
||||||
TunnelPoolSettings inboundSettings = new TunnelPoolSettings();
|
TunnelPoolSettings inboundSettings = new TunnelPoolSettings();
|
||||||
inboundSettings.setIsExploratory(true);
|
inboundSettings.setIsExploratory(true);
|
||||||
|
Reference in New Issue
Block a user