forked from I2P_Developers/i2p.i2p
* NetDB:
- Increase RI publish interval to reduce the connection load on ffs - Save RI-last-published time; check it before publishing - Minor cleanups
This commit is contained in:
@ -1,3 +1,8 @@
|
||||
2013-10-23 zzz
|
||||
* NetDB:
|
||||
- Increase RI publish interval to reduce the connection load on ffs
|
||||
- Save RI-last-published time; check it before publishing
|
||||
|
||||
2013-10-19 zzz
|
||||
* NetDB:
|
||||
- Reinstate ExpireRoutersJob
|
||||
|
@ -63,6 +63,12 @@ public abstract class NetworkDatabaseFacade implements Service {
|
||||
public abstract void unpublish(LeaseSet localLeaseSet);
|
||||
public abstract void fail(Hash dbEntry);
|
||||
|
||||
/**
|
||||
* The last time we successfully published our RI.
|
||||
* @since 0.9.9
|
||||
*/
|
||||
public long getLastRouterInfoPublishTime() { return 0; }
|
||||
|
||||
public abstract Set<Hash> getAllRouters();
|
||||
public int getKnownRouters() { return 0; }
|
||||
public int getKnownLeaseSets() { return 0; }
|
||||
|
@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 6;
|
||||
public final static long BUILD = 7;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@ -26,15 +26,27 @@ import net.i2p.util.Log;
|
||||
* send to the floodfills until the second time it runs.
|
||||
*/
|
||||
public class PublishLocalRouterInfoJob extends JobImpl {
|
||||
private Log _log;
|
||||
final static long PUBLISH_DELAY = 20*60*1000;
|
||||
private final Log _log;
|
||||
|
||||
/**
|
||||
* Don't store if somebody else stored it recently.
|
||||
*/
|
||||
private static final long MIN_PUBLISH_DELAY = 25*60*1000;
|
||||
|
||||
/**
|
||||
* Too short and the network puts a big connection load on the
|
||||
* floodfills since we store directly.
|
||||
* Too long and the floodfill will drop us - timeout is 60 minutes.
|
||||
*/
|
||||
private static final long PUBLISH_DELAY = MIN_PUBLISH_DELAY * 5 / 3;
|
||||
|
||||
/** this needs to be long enough to give us time to start up,
|
||||
but less than 20m (when we start accepting tunnels and could be a IBGW)
|
||||
Actually no, we need this soon if we are a new router or
|
||||
other routers have forgotten about us, else
|
||||
we can't build IB exploratory tunnels.
|
||||
*/
|
||||
final static long FIRST_TIME_DELAY = 90*1000;
|
||||
private static final long FIRST_TIME_DELAY = 90*1000;
|
||||
boolean _notFirstTime;
|
||||
|
||||
public PublishLocalRouterInfoJob(RouterContext ctx) {
|
||||
@ -44,6 +56,13 @@ public class PublishLocalRouterInfoJob extends JobImpl {
|
||||
|
||||
public String getName() { return "Publish Local Router Info"; }
|
||||
public void runJob() {
|
||||
long last = getContext().netDb().getLastRouterInfoPublishTime();
|
||||
long now = getContext().clock().now();
|
||||
if (last + MIN_PUBLISH_DELAY > now) {
|
||||
long delay = getDelay();
|
||||
requeue(last + delay);
|
||||
return;
|
||||
}
|
||||
RouterInfo ri = new RouterInfo(getContext().router().getRouterInfo());
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Old routerInfo contains " + ri.getAddresses().size()
|
||||
@ -79,10 +98,15 @@ public class PublishLocalRouterInfoJob extends JobImpl {
|
||||
_log.error("Error signing the updated local router info!", dfe);
|
||||
}
|
||||
if (_notFirstTime) {
|
||||
requeue((PUBLISH_DELAY * 3 / 4) + getContext().random().nextInt((int)PUBLISH_DELAY / 2));
|
||||
long delay = getDelay();
|
||||
requeue(delay);
|
||||
} else {
|
||||
requeue(FIRST_TIME_DELAY);
|
||||
_notFirstTime = true;
|
||||
}
|
||||
}
|
||||
|
||||
private long getDelay() {
|
||||
return (PUBLISH_DELAY * 3 / 4) + getContext().random().nextInt((int)PUBLISH_DELAY / 4);
|
||||
}
|
||||
}
|
||||
|
@ -212,9 +212,6 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
continue;
|
||||
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
|
||||
msg.setEntry(ds);
|
||||
msg.setReplyGateway(null);
|
||||
msg.setReplyToken(0);
|
||||
msg.setReplyTunnel(null);
|
||||
OutNetMessage m = new OutNetMessage(_context, msg, _context.clock().now()+FLOOD_TIMEOUT, FLOOD_PRIORITY, target);
|
||||
// note send failure but don't give credit on success
|
||||
// might need to change this
|
||||
@ -232,7 +229,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
|
||||
/** note in the profile that the store failed */
|
||||
private static class FloodFailedJob extends JobImpl {
|
||||
private Hash _peer;
|
||||
private final Hash _peer;
|
||||
|
||||
public FloodFailedJob(RouterContext ctx, Hash peer) {
|
||||
super(ctx);
|
||||
|
@ -207,9 +207,8 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
_facade.verifyFinished(_key);
|
||||
if (_message instanceof DatabaseStoreMessage) {
|
||||
// Verify it's as recent as the one we sent
|
||||
boolean success = false;
|
||||
DatabaseStoreMessage dsm = (DatabaseStoreMessage)_message;
|
||||
success = dsm.getEntry().getDate() >= _published;
|
||||
boolean success = dsm.getEntry().getDate() >= _published;
|
||||
if (success) {
|
||||
// store ok, w00t!
|
||||
getContext().profileManager().dbLookupSuccessful(_target, delay);
|
||||
@ -218,6 +217,8 @@ class FloodfillVerifyStoreJob extends JobImpl {
|
||||
getContext().statManager().addRateData("netDb.floodfillVerifyOK", delay, 0);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Verify success for " + _key);
|
||||
if (_isRouterInfo)
|
||||
_facade.routerInfoPublishSuccessful();
|
||||
return;
|
||||
}
|
||||
if (_log.shouldLog(Log.WARN))
|
||||
|
@ -57,6 +57,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
protected final PeerSelector _peerSelector;
|
||||
protected final RouterContext _context;
|
||||
private final ReseedChecker _reseedChecker;
|
||||
private volatile long _lastRIPublishTime;
|
||||
|
||||
/**
|
||||
* Map of Hash to RepublishLeaseSetJob for leases we'realready managing.
|
||||
@ -586,6 +587,23 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
||||
store(h, localRouterInfo);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the last time we successfully published our RI.
|
||||
* @since 0.9.9
|
||||
*/
|
||||
void routerInfoPublishSuccessful() {
|
||||
_lastRIPublishTime = _context.clock().now();
|
||||
}
|
||||
|
||||
/**
|
||||
* The last time we successfully published our RI.
|
||||
* @since 0.9.9
|
||||
*/
|
||||
@Override
|
||||
public long getLastRouterInfoPublishTime() {
|
||||
return _lastRIPublishTime;
|
||||
}
|
||||
|
||||
/**
|
||||
* Persist the local router's info (as updated) into netDb/my.info, since
|
||||
* ./router.info isn't always updated. This also allows external applications
|
||||
|
@ -164,7 +164,7 @@ class PersistentDataStore extends TransientDataStore {
|
||||
super(PersistentDataStore.this._context);
|
||||
_key = key;
|
||||
}
|
||||
public String getName() { return "Remove Key"; }
|
||||
public String getName() { return "Delete RI file"; }
|
||||
public void runJob() {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Removing key " + _key /* , getAddedBy() */);
|
||||
|
@ -33,11 +33,11 @@ import net.i2p.util.VersionComparator;
|
||||
*/
|
||||
class StoreJob extends JobImpl {
|
||||
protected final Log _log;
|
||||
private KademliaNetworkDatabaseFacade _facade;
|
||||
private final KademliaNetworkDatabaseFacade _facade;
|
||||
protected final StoreState _state;
|
||||
private final Job _onSuccess;
|
||||
private final Job _onFailure;
|
||||
private long _timeoutMs;
|
||||
private final long _timeoutMs;
|
||||
private final long _expiration;
|
||||
private final PeerSelector _peerSelector;
|
||||
|
||||
|
Reference in New Issue
Block a user