forked from I2P_Developers/i2p.i2p
remove unnecessary casts (eclipse)
This commit is contained in:
@ -123,7 +123,7 @@ public class Blocklist {
|
||||
for (Iterator<Hash> iter = _peerBlocklist.keySet().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = iter.next();
|
||||
String reason;
|
||||
String comment = (String) _peerBlocklist.get(peer);
|
||||
String comment = _peerBlocklist.get(peer);
|
||||
if (comment != null)
|
||||
reason = _x("Banned by router hash: {0}");
|
||||
else
|
||||
|
@ -561,7 +561,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
||||
public void addCapabilities(RouterInfo ri) {
|
||||
int bwLim = Math.min(_context.bandwidthLimiter().getInboundKBytesPerSecond(),
|
||||
_context.bandwidthLimiter().getOutboundKBytesPerSecond());
|
||||
bwLim = (int)(((float)bwLim) * getSharePercentage());
|
||||
bwLim = (int)(bwLim * getSharePercentage());
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Adding capabilities w/ bw limit @ " + bwLim, new Exception("caps"));
|
||||
|
||||
|
@ -381,7 +381,7 @@ class RouterThrottleImpl implements RouterThrottle {
|
||||
// Now see if 1m rates are too high
|
||||
long overage = Math.max(used1mIn - (maxKBpsIn*1024), used1mOut - (maxKBpsOut*1024));
|
||||
if ( (overage > 0) &&
|
||||
((overage/(float)(maxKBps*1024f)) > _context.random().nextFloat()) ) {
|
||||
((overage/(maxKBps*1024f)) > _context.random().nextFloat()) ) {
|
||||
if (_log.shouldLog(Log.WARN)) _log.warn("Reject tunnel, 1m rate (" + overage + " over) indicates overload.");
|
||||
setTunnelStatus(LIMIT_STR);
|
||||
return false;
|
||||
|
@ -213,7 +213,7 @@ public class StatisticsManager implements Service {
|
||||
double avgFrequency = rate.getLifetimeEventCount() / (double)numPeriods;
|
||||
buf.append(num(avgFrequency)).append(';');
|
||||
buf.append(num(rate.getExtremeEventCount())).append(';');
|
||||
buf.append(num((double)rate.getLifetimeEventCount())).append(';');
|
||||
buf.append(num(rate.getLifetimeEventCount())).append(';');
|
||||
}
|
||||
}
|
||||
return buf.toString();
|
||||
|
@ -568,7 +568,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
|
||||
RepublishLeaseSetJob j = null;
|
||||
synchronized (_publishingLeaseSets) {
|
||||
j = (RepublishLeaseSetJob)_publishingLeaseSets.get(h);
|
||||
j = _publishingLeaseSets.get(h);
|
||||
if (j == null) {
|
||||
j = new RepublishLeaseSetJob(_context, this, h);
|
||||
_publishingLeaseSets.put(h, j);
|
||||
@ -920,7 +920,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
boolean isNew = true;
|
||||
SearchJob searchJob = null;
|
||||
synchronized (_activeRequests) {
|
||||
searchJob = (SearchJob)_activeRequests.get(key);
|
||||
searchJob = _activeRequests.get(key);
|
||||
if (searchJob == null) {
|
||||
searchJob = new SearchJob(_context, this, key, onFindJob, onFailedLookupJob,
|
||||
timeoutMs, true, isLease);
|
||||
|
@ -285,9 +285,9 @@ public class PeerProfile {
|
||||
|
||||
// weighted since we want to let the average grow quickly and shrink slowly
|
||||
if (ms < _tunnelTestResponseTimeAvg)
|
||||
_tunnelTestResponseTimeAvg = 0.95*_tunnelTestResponseTimeAvg + .05*(double)ms;
|
||||
_tunnelTestResponseTimeAvg = 0.95*_tunnelTestResponseTimeAvg + .05*ms;
|
||||
else
|
||||
_tunnelTestResponseTimeAvg = 0.75*_tunnelTestResponseTimeAvg + .25*(double)ms;
|
||||
_tunnelTestResponseTimeAvg = 0.75*_tunnelTestResponseTimeAvg + .25*ms;
|
||||
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Updating tunnel test time for " + _peer.toBase64().substring(0,6)
|
||||
@ -307,7 +307,7 @@ public class PeerProfile {
|
||||
double rv = 0;
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
rv += _peakThroughput[i];
|
||||
rv /= (60d*1024d*(double)THROUGHPUT_COUNT);
|
||||
rv /= (60d*1024d*THROUGHPUT_COUNT);
|
||||
return rv;
|
||||
}
|
||||
public void setPeakThroughputKBps(double kBps) {
|
||||
@ -338,7 +338,7 @@ public class PeerProfile {
|
||||
double rv = 0;
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
rv += _peakTunnelThroughput[i];
|
||||
rv /= (10d*60d*1024d*(double)THROUGHPUT_COUNT);
|
||||
rv /= (10d*60d*1024d*THROUGHPUT_COUNT);
|
||||
return rv;
|
||||
}
|
||||
public void setPeakTunnelThroughputKBps(double kBps) {
|
||||
@ -381,7 +381,7 @@ public class PeerProfile {
|
||||
double rv = 0;
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
rv += _peakTunnel1mThroughput[i];
|
||||
rv /= (60d*1024d*(double)THROUGHPUT_COUNT);
|
||||
rv /= (60d*1024d*THROUGHPUT_COUNT);
|
||||
return rv;
|
||||
}
|
||||
public void setPeakTunnel1mThroughputKBps(double kBps) {
|
||||
|
@ -613,7 +613,7 @@ public class ProfileOrganizer {
|
||||
start = _context.random().nextInt(_notFailingPeersList.size());
|
||||
for (int i = 0; i < _notFailingPeersList.size() && selected.size() < needed; i++) {
|
||||
int curIndex = (i+start) % _notFailingPeersList.size();
|
||||
Hash cur = (Hash)_notFailingPeersList.get(curIndex);
|
||||
Hash cur = _notFailingPeersList.get(curIndex);
|
||||
if (matches.contains(cur) ||
|
||||
(exclude != null && exclude.contains(cur))) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
|
@ -373,13 +373,13 @@ public class FIFOBandwidthLimiter {
|
||||
_lastStatsUpdated = now;
|
||||
|
||||
if (_sendBps <= 0)
|
||||
_sendBps = ((float)sent*1000f)/(float)time;
|
||||
_sendBps = (sent*1000f)/time;
|
||||
else
|
||||
_sendBps = (0.9f)*_sendBps + (0.1f)*((float)sent*1000f)/(float)time;
|
||||
_sendBps = (0.9f)*_sendBps + (0.1f)*(sent*1000f)/time;
|
||||
if (_recvBps <= 0)
|
||||
_recvBps = ((float)recv*1000f)/(float)time;
|
||||
_recvBps = (recv*1000f)/time;
|
||||
else
|
||||
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/(float)time;
|
||||
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/time;
|
||||
|
||||
// warning, getStatLog() can be null
|
||||
//if (_log.shouldLog(Log.WARN)) {
|
||||
@ -395,12 +395,12 @@ public class FIFOBandwidthLimiter {
|
||||
//if (_sendBps15s <= 0)
|
||||
// _sendBps15s = (0.045f)*((float)sent*15*1000f)/(float)time;
|
||||
//else
|
||||
_sendBps15s = (0.955f)*_sendBps15s + (0.045f)*((float)sent*1000f)/(float)time;
|
||||
_sendBps15s = (0.955f)*_sendBps15s + (0.045f)*(sent*1000f)/time;
|
||||
|
||||
//if (_recvBps15s <= 0)
|
||||
// _recvBps15s = (0.045f)*((float)recv*15*1000f)/(float)time;
|
||||
//else
|
||||
_recvBps15s = (0.955f)*_recvBps15s + (0.045f)*((float)recv*1000)/(float)time;
|
||||
_recvBps15s = (0.955f)*_recvBps15s + (0.045f)*((float)recv*1000)/time;
|
||||
|
||||
// warning, getStatLog() can be null
|
||||
//if (_log.shouldLog(Log.WARN)) {
|
||||
|
@ -356,7 +356,7 @@ class GeoIP {
|
||||
int rv = 0;
|
||||
for (int i = 0; i < 4; i++)
|
||||
rv |= (ip[i] & 0xff) << ((3-i)*8);
|
||||
return ((long) rv) & 0xffffffffl;
|
||||
return rv & 0xffffffffl;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -271,7 +271,7 @@ public class OutboundMessageRegistry {
|
||||
//for (Iterator<MessageSelector> iter = _selectors.iterator(); iter.hasNext(); ) {
|
||||
// MessageSelector sel = iter.next();
|
||||
for (int i = 0; i < _selectors.size(); i++) {
|
||||
MessageSelector sel = (MessageSelector)_selectors.get(i);
|
||||
MessageSelector sel = _selectors.get(i);
|
||||
long expiration = sel.getExpiration();
|
||||
if (expiration <= now) {
|
||||
removing.add(sel);
|
||||
|
@ -507,7 +507,7 @@ public abstract class TransportImpl implements Transport {
|
||||
public boolean isUnreachable(Hash peer) {
|
||||
long now = _context.clock().now();
|
||||
synchronized (_unreachableEntries) {
|
||||
Long when = (Long)_unreachableEntries.get(peer);
|
||||
Long when = _unreachableEntries.get(peer);
|
||||
if (when == null) return false;
|
||||
if (when.longValue() + UNREACHABLE_PERIOD < now) {
|
||||
_unreachableEntries.remove(peer);
|
||||
@ -543,7 +543,7 @@ public abstract class TransportImpl implements Transport {
|
||||
synchronized (_unreachableEntries) {
|
||||
for (Iterator iter = _unreachableEntries.keySet().iterator(); iter.hasNext(); ) {
|
||||
Hash peer = (Hash)iter.next();
|
||||
Long when = (Long)_unreachableEntries.get(peer);
|
||||
Long when = _unreachableEntries.get(peer);
|
||||
if (when.longValue() + UNREACHABLE_PERIOD < now)
|
||||
iter.remove();
|
||||
}
|
||||
|
@ -999,8 +999,8 @@ class NTCPConnection implements FIFOBandwidthLimiter.CompleteListener {
|
||||
_lastBytesReceived = totR;
|
||||
_lastRateUpdated = now;
|
||||
|
||||
_sendBps = (0.9f)*_sendBps + (0.1f)*((float)sent*1000f)/(float)time;
|
||||
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/(float)time;
|
||||
_sendBps = (0.9f)*_sendBps + (0.1f)*(sent*1000f)/time;
|
||||
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/time;
|
||||
|
||||
// Maintain an approximate average with a 15-second halflife
|
||||
// Weights (0.955 and 0.045) are tuned so that transition between two values (e.g. 0..10)
|
||||
|
@ -163,7 +163,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
//_context.shitlist().unshitlistRouter(con.getRemotePeer().calculateHash());
|
||||
NTCPConnection old = null;
|
||||
synchronized (_conLock) {
|
||||
old = (NTCPConnection)_conByIdent.put(con.getRemotePeer().calculateHash(), con);
|
||||
old = _conByIdent.put(con.getRemotePeer().calculateHash(), con);
|
||||
}
|
||||
if (old != null) {
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
@ -181,7 +181,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
NTCPConnection con = null;
|
||||
boolean isNew = false;
|
||||
synchronized (_conLock) {
|
||||
con = (NTCPConnection)_conByIdent.get(ih);
|
||||
con = _conByIdent.get(ih);
|
||||
if (con == null) {
|
||||
isNew = true;
|
||||
RouterAddress addr = msg.getTarget().getTargetAddress(STYLE);
|
||||
@ -356,7 +356,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
@Override
|
||||
public boolean isEstablished(Hash dest) {
|
||||
synchronized (_conLock) {
|
||||
NTCPConnection con = (NTCPConnection)_conByIdent.get(dest);
|
||||
NTCPConnection con = _conByIdent.get(dest);
|
||||
return (con != null) && con.isEstablished() && !con.isClosed();
|
||||
}
|
||||
}
|
||||
@ -364,7 +364,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
@Override
|
||||
public boolean isBacklogged(Hash dest) {
|
||||
synchronized (_conLock) {
|
||||
NTCPConnection con = (NTCPConnection)_conByIdent.get(dest);
|
||||
NTCPConnection con = _conByIdent.get(dest);
|
||||
return (con != null) && con.isEstablished() && con.tooBacklogged();
|
||||
}
|
||||
}
|
||||
@ -374,7 +374,7 @@ public class NTCPTransport extends TransportImpl {
|
||||
synchronized (_conLock) {
|
||||
RouterIdentity ident = con.getRemotePeer();
|
||||
if (ident != null)
|
||||
removed = (NTCPConnection)_conByIdent.remove(ident.calculateHash());
|
||||
removed = _conByIdent.remove(ident.calculateHash());
|
||||
}
|
||||
if ( (removed != null) && (removed != con) ) {// multiple cons, close 'em both
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
|
@ -459,7 +459,7 @@ class PacketBuilder {
|
||||
int fullACKCount = 0;
|
||||
int partialACKCount = 0;
|
||||
for (int i = 0; i < ackBitfields.size(); i++) {
|
||||
if (((ACKBitfield)ackBitfields.get(i)).receivedComplete())
|
||||
if (ackBitfields.get(i).receivedComplete())
|
||||
fullACKCount++;
|
||||
else
|
||||
partialACKCount++;
|
||||
@ -496,7 +496,7 @@ class PacketBuilder {
|
||||
DataHelper.toLong(data, off, 1, partialACKCount);
|
||||
off++;
|
||||
for (int i = 0; i < ackBitfields.size(); i++) {
|
||||
ACKBitfield bitfield = (ACKBitfield)ackBitfields.get(i);
|
||||
ACKBitfield bitfield = ackBitfields.get(i);
|
||||
if (bitfield.receivedComplete()) continue;
|
||||
DataHelper.toLong(data, off, 4, bitfield.getMessageId());
|
||||
off += 4;
|
||||
|
@ -481,7 +481,7 @@ class PeerState {
|
||||
* A positive number means our clock is ahead of theirs.
|
||||
*/
|
||||
public void adjustClockSkew(long skew) {
|
||||
_clockSkew = (long) (0.9*(float)_clockSkew + 0.1*(float)(skew - (_rtt / 2)));
|
||||
_clockSkew = (long) (0.9*_clockSkew + 0.1*(skew - (_rtt / 2)));
|
||||
}
|
||||
|
||||
/** what is the current receive second, for congestion control? */
|
||||
@ -542,7 +542,7 @@ class PeerState {
|
||||
if (duration >= 1000) {
|
||||
_sendWindowBytesRemaining = _sendWindowBytes;
|
||||
_sendBytes += size;
|
||||
_sendBps = (int)(0.9f*(float)_sendBps + 0.1f*((float)_sendBytes * (1000f/(float)duration)));
|
||||
_sendBps = (int)(0.9f*_sendBps + 0.1f*(_sendBytes * (1000f/duration)));
|
||||
//if (isForACK) {
|
||||
// _sendACKBytes += size;
|
||||
// _sendACKBps = (int)(0.9f*(float)_sendACKBps + 0.1f*((float)_sendACKBytes * (1000f/(float)duration)));
|
||||
@ -628,7 +628,7 @@ class PeerState {
|
||||
long now = _context.clock().now();
|
||||
long duration = now - _receivePeriodBegin;
|
||||
if (duration >= 1000) {
|
||||
_receiveBps = (int)(0.9f*(float)_receiveBps + 0.1f*((float)_receiveBytes * (1000f/(float)duration)));
|
||||
_receiveBps = (int)(0.9f*_receiveBps + 0.1f*(_receiveBytes * (1000f/duration)));
|
||||
//if (isForACK)
|
||||
// _receiveACKBps = (int)(0.9f*(float)_receiveACKBps + 0.1f*((float)_receiveACKBytes * (1000f/(float)duration)));
|
||||
//_receiveACKBytes = 0;
|
||||
@ -1008,10 +1008,10 @@ class PeerState {
|
||||
// the faster we are going, the slower we want to reduce the rtt
|
||||
float scale = 0.1f;
|
||||
if (_sendBps > 0)
|
||||
scale = ((float)lifetime) / (float)((float)lifetime + (float)_sendBps);
|
||||
scale = lifetime / ((float)lifetime + (float)_sendBps);
|
||||
if (scale < 0.001f) scale = 0.001f;
|
||||
|
||||
_rtt = (int)(((float)_rtt)*(1.0f-scale) + (scale)*(float)lifetime);
|
||||
_rtt = (int)(_rtt*(1.0f-scale) + (scale)*lifetime);
|
||||
_rto = _rtt + (_rttDeviation<<2);
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Recalculating timeouts w/ lifetime=" + lifetime + ": rtt=" + _rtt
|
||||
|
@ -331,7 +331,7 @@ class UDPPacketReader {
|
||||
off++;
|
||||
off += size;
|
||||
}
|
||||
return (int)_message[off];
|
||||
return _message[off];
|
||||
}
|
||||
|
||||
public long readMessageId(int fragmentNum) {
|
||||
@ -459,7 +459,7 @@ class UDPPacketReader {
|
||||
off++;
|
||||
buf.append(" frag# ").append(fragNum);
|
||||
buf.append(" isLast? ").append(isLast);
|
||||
buf.append(" info ").append((int)_message[off-1]);
|
||||
buf.append(" info ").append(_message[off-1]);
|
||||
int size = ((int)DataHelper.fromLong(_message, off, 2)) & 0x3FFF;
|
||||
buf.append(" with ").append(size).append(" bytes");
|
||||
buf.append(' ');
|
||||
|
@ -242,7 +242,7 @@ class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
|
||||
target[offset] |= 1;
|
||||
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("CONTROL: " + Integer.toHexString((int)target[offset]) + "/"
|
||||
_log.debug("CONTROL: " + Integer.toHexString(target[offset]) + "/"
|
||||
+ Base64.encode(target, offset, 1) + " at offset " + offset);
|
||||
|
||||
offset++;
|
||||
|
@ -136,7 +136,7 @@ public class TunnelCreatorConfig implements TunnelInfo {
|
||||
long timeSince = now - _peakThroughputLastCoallesce;
|
||||
if (timeSince >= 60*1000) {
|
||||
long tot = _peakThroughputCurrentTotal;
|
||||
double normalized = (double)tot * 60d*1000d / (double)timeSince;
|
||||
double normalized = tot * 60d*1000d / timeSince;
|
||||
_peakThroughputLastCoallesce = now;
|
||||
_peakThroughputCurrentTotal = 0;
|
||||
if (_context != null)
|
||||
@ -158,7 +158,7 @@ public class TunnelCreatorConfig implements TunnelInfo {
|
||||
double rv = 0;
|
||||
for (int i = 0; i < THROUGHPUT_COUNT; i++)
|
||||
rv += _peakThroughput[i];
|
||||
rv /= (60d*1024d*(double)THROUGHPUT_COUNT);
|
||||
rv /= (60d*1024d*THROUGHPUT_COUNT);
|
||||
return rv;
|
||||
}
|
||||
public void setPeakThroughputKBps(double kBps) {
|
||||
|
@ -275,7 +275,7 @@ abstract class BuildRequestor {
|
||||
log.debug("Build order: " + order + " for " + cfg);
|
||||
|
||||
for (int i = 0; i < msg.getRecordCount(); i++) {
|
||||
int hop = ((Integer)order.get(i)).intValue();
|
||||
int hop = order.get(i).intValue();
|
||||
PublicKey key = null;
|
||||
|
||||
if (BuildMessageGenerator.isBlank(cfg, hop)) {
|
||||
|
Reference in New Issue
Block a user