forked from I2P_Developers/i2p.i2p
* Netdb: Add a job to refresh all the old router infos at startup,
to speed integration
This commit is contained in:
@ -18,7 +18,7 @@ public class RouterVersion {
|
||||
/** deprecated */
|
||||
public final static String ID = "Monotone";
|
||||
public final static String VERSION = CoreVersion.VERSION;
|
||||
public final static long BUILD = 18;
|
||||
public final static long BUILD = 19;
|
||||
|
||||
/** for example "-test" */
|
||||
public final static String EXTRA = "";
|
||||
|
@ -65,6 +65,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
||||
super.startup();
|
||||
_context.jobQueue().addJob(new FloodfillMonitorJob(_context, this));
|
||||
_lookupThrottler = new LookupThrottler();
|
||||
|
||||
// refresh old routers
|
||||
Job rrj = new RefreshRoutersJob(_context, this);
|
||||
rrj.getTiming().setStartAfter(_context.clock().now() + 5*60*1000);
|
||||
_context.jobQueue().addJob(rrj);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -0,0 +1,82 @@
|
||||
package net.i2p.router.networkdb.kademlia;
|
||||
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.Set;
|
||||
|
||||
import net.i2p.data.Hash;
|
||||
import net.i2p.data.RouterInfo;
|
||||
import net.i2p.router.JobImpl;
|
||||
import net.i2p.router.RouterContext;
|
||||
import net.i2p.util.Log;
|
||||
|
||||
/**
|
||||
* Go through all the routers once, after startup, and refetch their router infos.
|
||||
* This should be run once after startup (and preferably after any reseed is complete,
|
||||
* but we don't have any indication when that is).
|
||||
* This will help routers that start after being shutdown for many days or weeks,
|
||||
* as well as newly-reseeded routers, since
|
||||
* validate() in KNDF doesn't start failing and refetching until the router has been
|
||||
* up for an hour.
|
||||
* To improve integration even more, we fetch the floodfills first.
|
||||
* Ideally this should complete within the first half-hour of uptime.
|
||||
*
|
||||
* @since 0.8.8
|
||||
*/
|
||||
class RefreshRoutersJob extends JobImpl {
|
||||
private final Log _log;
|
||||
private final FloodfillNetworkDatabaseFacade _facade;
|
||||
private List<Hash> _routers;
|
||||
|
||||
/** rerun fairly often. 1500 routers in 50 minutes */
|
||||
private final static long RERUN_DELAY_MS = 2*1000;
|
||||
private final static long EXPIRE = 60*60*1000;
|
||||
|
||||
public RefreshRoutersJob(RouterContext ctx, FloodfillNetworkDatabaseFacade facade) {
|
||||
super(ctx);
|
||||
_log = ctx.logManager().getLog(RefreshRoutersJob.class);
|
||||
_facade = facade;
|
||||
}
|
||||
|
||||
public String getName() { return "Refresh Routers Job"; }
|
||||
|
||||
public void runJob() {
|
||||
if (_facade.isInitialized()) {
|
||||
if (_routers == null) {
|
||||
// make a list of all routers, floodfill first
|
||||
_routers = _facade.getFloodfillPeers();
|
||||
int ff = _routers.size();
|
||||
Set<Hash> all = _facade.getAllRouters();
|
||||
all.removeAll(_routers);
|
||||
int non = all.size();
|
||||
_routers.addAll(all);
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("To check: " + ff + " floodfills and " + non + " non-floodfills");
|
||||
}
|
||||
if (_routers.isEmpty()) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Finished");
|
||||
return;
|
||||
}
|
||||
long expire = getContext().clock().now() - EXPIRE;
|
||||
for (Iterator<Hash> iter = _routers.iterator(); iter.hasNext(); ) {
|
||||
Hash h = iter.next();
|
||||
iter.remove();
|
||||
if (h.equals(getContext().routerHash()))
|
||||
continue;
|
||||
if (_log.shouldLog(Log.DEBUG))
|
||||
_log.debug("Checking " + h);
|
||||
RouterInfo ri = _facade.lookupRouterInfoLocally(h);
|
||||
if (ri == null)
|
||||
continue;
|
||||
if (ri.getPublished() < expire) {
|
||||
if (_log.shouldLog(Log.INFO))
|
||||
_log.info("Refreshing " + h);
|
||||
_facade.search(h, null, null, 15*1000, false);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
requeue(RERUN_DELAY_MS);
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user