merge of '74266b0afe4ef1abef923c8389fb47263b9a39e1'

and '9a62d1aa11b74d835ec795c0a303bf5c2ebc2793'
This commit is contained in:
z3d
2009-10-16 20:16:29 +00:00
23 changed files with 179 additions and 108 deletions

View File

@ -694,6 +694,9 @@ public class SnarkManager implements Snark.CompleteListener {
} }
} }
/**
* "name", "announceURL=websiteURL" pairs
*/
private static final String DEFAULT_TRACKERS[] = { private static final String DEFAULT_TRACKERS[] = {
// "Postman", "http://YRgrgTLGnbTq2aZOZDJQ~o6Uk5k6TK-OZtx0St9pb0G-5EGYURZioxqYG8AQt~LgyyI~NCj6aYWpPO-150RcEvsfgXLR~CxkkZcVpgt6pns8SRc3Bi-QSAkXpJtloapRGcQfzTtwllokbdC-aMGpeDOjYLd8b5V9Im8wdCHYy7LRFxhEtGb~RL55DA8aYOgEXcTpr6RPPywbV~Qf3q5UK55el6Kex-6VCxreUnPEe4hmTAbqZNR7Fm0hpCiHKGoToRcygafpFqDw5frLXToYiqs9d4liyVB-BcOb0ihORbo0nS3CLmAwZGvdAP8BZ7cIYE3Z9IU9D1G8JCMxWarfKX1pix~6pIA-sp1gKlL1HhYhPMxwyxvuSqx34o3BqU7vdTYwWiLpGM~zU1~j9rHL7x60pVuYaXcFQDR4-QVy26b6Pt6BlAZoFmHhPcAuWfu-SFhjyZYsqzmEmHeYdAwa~HojSbofg0TMUgESRXMw6YThK1KXWeeJVeztGTz25sL8AAAA.i2p/announce.php=http://tracker.postman.i2p/" // "Postman", "http://YRgrgTLGnbTq2aZOZDJQ~o6Uk5k6TK-OZtx0St9pb0G-5EGYURZioxqYG8AQt~LgyyI~NCj6aYWpPO-150RcEvsfgXLR~CxkkZcVpgt6pns8SRc3Bi-QSAkXpJtloapRGcQfzTtwllokbdC-aMGpeDOjYLd8b5V9Im8wdCHYy7LRFxhEtGb~RL55DA8aYOgEXcTpr6RPPywbV~Qf3q5UK55el6Kex-6VCxreUnPEe4hmTAbqZNR7Fm0hpCiHKGoToRcygafpFqDw5frLXToYiqs9d4liyVB-BcOb0ihORbo0nS3CLmAwZGvdAP8BZ7cIYE3Z9IU9D1G8JCMxWarfKX1pix~6pIA-sp1gKlL1HhYhPMxwyxvuSqx34o3BqU7vdTYwWiLpGM~zU1~j9rHL7x60pVuYaXcFQDR4-QVy26b6Pt6BlAZoFmHhPcAuWfu-SFhjyZYsqzmEmHeYdAwa~HojSbofg0TMUgESRXMw6YThK1KXWeeJVeztGTz25sL8AAAA.i2p/announce.php=http://tracker.postman.i2p/"
// , "eBook", "http://E71FRom6PZNEqTN2Lr8P-sr23b7HJVC32KoGnVQjaX6zJiXwhJy2HsXob36Qmj81TYFZdewFZa9mSJ533UZgGyQkXo2ahctg82JKYZfDe5uDxAn1E9YPjxZCWJaFJh0S~UwSs~9AZ7UcauSJIoNtpxrtbmRNVFLqnkEDdLZi26TeucfOmiFmIWnVblLniWv3tG1boE9Abd-6j3FmYVrRucYuepAILYt6katmVNOk6sXmno1Eynrp~~MBuFq0Ko6~jsc2E2CRVYXDhGHEMdt-j6JUz5D7S2RIVzDRqQyAZLKJ7OdQDmI31przzmne1vOqqqLC~1xUumZVIvF~yOeJUGNjJ1Vx0J8i2BQIusn1pQJ6UCB~ZtZZLQtEb8EPVCfpeRi2ri1M5CyOuxN0V5ekmPHrYIBNevuTCRC26NP7ZS5VDgx1~NaC3A-CzJAE6f1QXi0wMI9aywNG5KGzOPifcsih8eyGyytvgLtrZtV7ykzYpPCS-rDfITncpn5hliPUAAAA.i2p/pub/bt/announce.php=http://de-ebook-archiv.i2p/pub/bt/" // , "eBook", "http://E71FRom6PZNEqTN2Lr8P-sr23b7HJVC32KoGnVQjaX6zJiXwhJy2HsXob36Qmj81TYFZdewFZa9mSJ533UZgGyQkXo2ahctg82JKYZfDe5uDxAn1E9YPjxZCWJaFJh0S~UwSs~9AZ7UcauSJIoNtpxrtbmRNVFLqnkEDdLZi26TeucfOmiFmIWnVblLniWv3tG1boE9Abd-6j3FmYVrRucYuepAILYt6katmVNOk6sXmno1Eynrp~~MBuFq0Ko6~jsc2E2CRVYXDhGHEMdt-j6JUz5D7S2RIVzDRqQyAZLKJ7OdQDmI31przzmne1vOqqqLC~1xUumZVIvF~yOeJUGNjJ1Vx0J8i2BQIusn1pQJ6UCB~ZtZZLQtEb8EPVCfpeRi2ri1M5CyOuxN0V5ekmPHrYIBNevuTCRC26NP7ZS5VDgx1~NaC3A-CzJAE6f1QXi0wMI9aywNG5KGzOPifcsih8eyGyytvgLtrZtV7ykzYpPCS-rDfITncpn5hliPUAAAA.i2p/pub/bt/announce.php=http://de-ebook-archiv.i2p/pub/bt/"
@ -705,9 +708,9 @@ public class SnarkManager implements Snark.CompleteListener {
// , "mastertracker", "http://VzXD~stRKbL3MOmeTn1iaCQ0CFyTmuFHiKYyo0Rd~dFPZFCYH-22rT8JD7i-C2xzYFa4jT5U2aqHzHI-Jre4HL3Ri5hFtZrLk2ax3ji7Qfb6qPnuYkuiF2E2UDmKUOppI8d9Ye7tjdhQVCy0izn55tBaB-U7UWdcvSK2i85sauyw3G0Gfads1Rvy5-CAe2paqyYATcDmGjpUNLoxbfv9KH1KmwRTNH6k1v4PyWYYnhbT39WfKMbBjSxVQRdi19cyJrULSWhjxaQfJHeWx5Z8Ev4bSPByBeQBFl2~4vqy0S5RypINsRSa3MZdbiAAyn5tr5slWR6QdoqY3qBQgBJFZppy-3iWkFqqKgSxCPundF8gdDLC5ddizl~KYcYKl42y9SGFHIukH-TZs8~em0~iahzsqWVRks3zRG~tlBcX2U3M2~OJs~C33-NKhyfZT7-XFBREvb8Szmd~p66jDxrwOnKaku-G6DyoQipJqIz4VHmY9-y5T8RrUcJcM-5lVoMpAAAA.i2p/announce.php=http://tracker.mastertracker.i2p/" // , "mastertracker", "http://VzXD~stRKbL3MOmeTn1iaCQ0CFyTmuFHiKYyo0Rd~dFPZFCYH-22rT8JD7i-C2xzYFa4jT5U2aqHzHI-Jre4HL3Ri5hFtZrLk2ax3ji7Qfb6qPnuYkuiF2E2UDmKUOppI8d9Ye7tjdhQVCy0izn55tBaB-U7UWdcvSK2i85sauyw3G0Gfads1Rvy5-CAe2paqyYATcDmGjpUNLoxbfv9KH1KmwRTNH6k1v4PyWYYnhbT39WfKMbBjSxVQRdi19cyJrULSWhjxaQfJHeWx5Z8Ev4bSPByBeQBFl2~4vqy0S5RypINsRSa3MZdbiAAyn5tr5slWR6QdoqY3qBQgBJFZppy-3iWkFqqKgSxCPundF8gdDLC5ddizl~KYcYKl42y9SGFHIukH-TZs8~em0~iahzsqWVRks3zRG~tlBcX2U3M2~OJs~C33-NKhyfZT7-XFBREvb8Szmd~p66jDxrwOnKaku-G6DyoQipJqIz4VHmY9-y5T8RrUcJcM-5lVoMpAAAA.i2p/announce.php=http://tracker.mastertracker.i2p/"
// , "Galen", "http://5jpwQMI5FT303YwKa5Rd38PYSX04pbIKgTaKQsWbqoWjIfoancFdWCShXHLI5G5ofOb0Xu11vl2VEMyPsg1jUFYSVnu4-VfMe3y4TKTR6DTpetWrnmEK6m2UXh91J5DZJAKlgmO7UdsFlBkQfR2rY853-DfbJtQIFl91tbsmjcA5CGQi4VxMFyIkBzv-pCsuLQiZqOwWasTlnzey8GcDAPG1LDcvfflGV~6F5no9mnuisZPteZKlrv~~TDoXTj74QjByWc4EOYlwqK8sbU9aOvz~s31XzErbPTfwiawiaZ0RUI-IDrKgyvmj0neuFTWgjRGVTH8bz7cBZIc3viy6ioD-eMQOrXaQL0TCWZUelRwHRvgdPiQrxdYQs7ixkajeHzxi-Pq0EMm5Vbh3j3Q9kfUFW3JjFDA-MLB4g6XnjCbM5J1rC0oOBDCIEfhQkszru5cyLjHiZ5yeA0VThgu~c7xKHybv~OMXION7V8pBKOgET7ZgAkw1xgYe3Kkyq5syAAAA.i2p/tr/announce.php=http://galen.i2p/tr/" // , "Galen", "http://5jpwQMI5FT303YwKa5Rd38PYSX04pbIKgTaKQsWbqoWjIfoancFdWCShXHLI5G5ofOb0Xu11vl2VEMyPsg1jUFYSVnu4-VfMe3y4TKTR6DTpetWrnmEK6m2UXh91J5DZJAKlgmO7UdsFlBkQfR2rY853-DfbJtQIFl91tbsmjcA5CGQi4VxMFyIkBzv-pCsuLQiZqOwWasTlnzey8GcDAPG1LDcvfflGV~6F5no9mnuisZPteZKlrv~~TDoXTj74QjByWc4EOYlwqK8sbU9aOvz~s31XzErbPTfwiawiaZ0RUI-IDrKgyvmj0neuFTWgjRGVTH8bz7cBZIc3viy6ioD-eMQOrXaQL0TCWZUelRwHRvgdPiQrxdYQs7ixkajeHzxi-Pq0EMm5Vbh3j3Q9kfUFW3JjFDA-MLB4g6XnjCbM5J1rC0oOBDCIEfhQkszru5cyLjHiZ5yeA0VThgu~c7xKHybv~OMXION7V8pBKOgET7ZgAkw1xgYe3Kkyq5syAAAA.i2p/tr/announce.php=http://galen.i2p/tr/"
"POSTMAN", "http://tracker2.postman.i2p/announce.php=http://tracker2.postman.i2p/" "POSTMAN", "http://tracker2.postman.i2p/announce.php=http://tracker2.postman.i2p/"
,"WELTERDE", "http://BGKmlDOoH3RzFbPRfRpZV2FjpVj8~3moFftw5-dZfDf2070TOe8Tf2~DAVeaM6ZRLdmFEt~9wyFL8YMLMoLoiwGEH6IGW6rc45tstN68KsBDWZqkTohV1q9XFgK9JnCwE~Oi89xLBHsLMTHOabowWM6dkC8nI6QqJC2JODqLPIRfOVrDdkjLwtCrsckzLybNdFmgfoqF05UITDyczPsFVaHtpF1sRggOVmdvCM66otyonlzNcJbn59PA-R808vUrCPMGU~O9Wys0i-NoqtIbtWfOKnjCRFMNw5ex4n9m5Sxm9e20UkpKG6qzEuvKZWi8vTLe1NW~CBrj~vG7I3Ok4wybUFflBFOaBabxYJLlx4xTE1zJIVxlsekmAjckB4v-cQwulFeikR4LxPQ6mCQknW2HZ4JQIq6hL9AMabxjOlYnzh7kjOfRGkck8YgeozcyTvcDUcUsOuSTk06L4kdrv8h2Cozjbloi5zl6KTbj5ZTciKCxi73Pn9grICn-HQqEAAAA.i2p/a=http://tracker.welterde.i2p/stats?mode=top5" ,"WELTERDE", "http://tracker.welterde.i2p/a=http://tracker.welterde.i2p/stats?mode=top5"
, "CRSTRACK", "http://b4G9sCdtfvccMAXh~SaZrPqVQNyGQbhbYMbw6supq2XGzbjU4NcOmjFI0vxQ8w1L05twmkOvg5QERcX6Mi8NQrWnR0stLExu2LucUXg1aYjnggxIR8TIOGygZVIMV3STKH4UQXD--wz0BUrqaLxPhrm2Eh9Hwc8TdB6Na4ShQUq5Xm8D4elzNUVdpM~RtChEyJWuQvoGAHY3ppX-EJJLkiSr1t77neS4Lc-KofMVmgI9a2tSSpNAagBiNI6Ak9L1T0F9uxeDfEG9bBSQPNMOSUbAoEcNxtt7xOW~cNOAyMyGydwPMnrQ5kIYPY8Pd3XudEko970vE0D6gO19yoBMJpKx6Dh50DGgybLQ9CpRaynh2zPULTHxm8rneOGRcQo8D3mE7FQ92m54~SvfjXjD2TwAVGI~ae~n9HDxt8uxOecAAvjjJ3TD4XM63Q9TmB38RmGNzNLDBQMEmJFpqQU8YeuhnS54IVdUoVQFqui5SfDeLXlSkh4vYoMU66pvBfWbAAAA.i2p/tracker/announce.php=http://crstrack.i2p/tracker/" , "CRSTRACK", "http://b4G9sCdtfvccMAXh~SaZrPqVQNyGQbhbYMbw6supq2XGzbjU4NcOmjFI0vxQ8w1L05twmkOvg5QERcX6Mi8NQrWnR0stLExu2LucUXg1aYjnggxIR8TIOGygZVIMV3STKH4UQXD--wz0BUrqaLxPhrm2Eh9Hwc8TdB6Na4ShQUq5Xm8D4elzNUVdpM~RtChEyJWuQvoGAHY3ppX-EJJLkiSr1t77neS4Lc-KofMVmgI9a2tSSpNAagBiNI6Ak9L1T0F9uxeDfEG9bBSQPNMOSUbAoEcNxtt7xOW~cNOAyMyGydwPMnrQ5kIYPY8Pd3XudEko970vE0D6gO19yoBMJpKx6Dh50DGgybLQ9CpRaynh2zPULTHxm8rneOGRcQo8D3mE7FQ92m54~SvfjXjD2TwAVGI~ae~n9HDxt8uxOecAAvjjJ3TD4XM63Q9TmB38RmGNzNLDBQMEmJFpqQU8YeuhnS54IVdUoVQFqui5SfDeLXlSkh4vYoMU66pvBfWbAAAA.i2p/tracker/announce.php=http://crstrack.i2p/tracker/"
, "ThePirateBay", "http://tracker.thepiratebay.i2p/announce=http://thepiratebay.i2p/"
}; };
/** comma delimited list of name=announceURL=baseURL for the trackers to be displayed */ /** comma delimited list of name=announceURL=baseURL for the trackers to be displayed */

View File

@ -505,6 +505,7 @@ public class I2PSnarkServlet extends HttpServlet {
// temporarily hardcoded for postman* and anonymity, requires bytemonsoon patch for lookup by info_hash // temporarily hardcoded for postman* and anonymity, requires bytemonsoon patch for lookup by info_hash
String announce = snark.meta.getAnnounce(); String announce = snark.meta.getAnnounce();
if (announce.startsWith("http://YRgrgTLG") || announce.startsWith("http://8EoJZIKr") || if (announce.startsWith("http://YRgrgTLG") || announce.startsWith("http://8EoJZIKr") ||
announce.startsWith("http://4svjpPox") || announce.startsWith("http://tracker.thepiratebay.i2p/") ||
announce.startsWith("http://lnQ6yoBT") || announce.startsWith("http://tracker2.postman.i2p/")) { announce.startsWith("http://lnQ6yoBT") || announce.startsWith("http://tracker2.postman.i2p/")) {
Map trackers = _manager.getTrackers(); Map trackers = _manager.getTrackers();
for (Iterator iter = trackers.entrySet().iterator(); iter.hasNext(); ) { for (Iterator iter = trackers.entrySet().iterator(); iter.hasNext(); ) {
@ -512,7 +513,8 @@ public class I2PSnarkServlet extends HttpServlet {
String name = (String)entry.getKey(); String name = (String)entry.getKey();
String baseURL = (String)entry.getValue(); String baseURL = (String)entry.getValue();
if (!(baseURL.startsWith(announce) || // vvv hack for non-b64 announce in list vvv if (!(baseURL.startsWith(announce) || // vvv hack for non-b64 announce in list vvv
(announce.startsWith("http://lnQ6yoBT") && baseURL.startsWith("http://tracker2.postman.i2p/")))) (announce.startsWith("http://lnQ6yoBT") && baseURL.startsWith("http://tracker2.postman.i2p/")) ||
(announce.startsWith("http://4svjpPox") && baseURL.startsWith("http://thepiratebay.i2p/"))))
continue; continue;
int e = baseURL.indexOf('='); int e = baseURL.indexOf('=');
if (e < 0) if (e < 0)

View File

@ -1,4 +1,4 @@
package net.i2p.router.peermanager; package net.i2p.router.web;
import java.io.IOException; import java.io.IOException;
import java.io.Writer; import java.io.Writer;
@ -12,6 +12,9 @@ import net.i2p.data.DataHelper;
import net.i2p.data.Hash; import net.i2p.data.Hash;
import net.i2p.data.RouterInfo; import net.i2p.data.RouterInfo;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
import net.i2p.router.peermanager.DBHistory;
import net.i2p.router.peermanager.PeerProfile;
import net.i2p.router.peermanager.ProfileOrganizer;
import net.i2p.stat.Rate; import net.i2p.stat.Rate;
import net.i2p.stat.RateStat; import net.i2p.stat.RateStat;

View File

@ -1,30 +1,29 @@
package net.i2p.router.web; package net.i2p.router.web;
import java.io.ByteArrayOutputStream;
import java.io.IOException; import java.io.IOException;
import java.io.OutputStreamWriter;
public class ProfilesHelper extends HelperBase { public class ProfilesHelper extends HelperBase {
public ProfilesHelper() {} public ProfilesHelper() {}
/** @return empty string, writes directly to _out */
public String getProfileSummary() { public String getProfileSummary() {
ByteArrayOutputStream baos = new ByteArrayOutputStream(16*1024);
try { try {
_context.profileOrganizer().renderStatusHTML(new OutputStreamWriter(baos)); ProfileOrganizerRenderer rend = new ProfileOrganizerRenderer(_context.profileOrganizer(), _context);
rend.renderStatusHTML(_out);
} catch (IOException ioe) { } catch (IOException ioe) {
ioe.printStackTrace(); ioe.printStackTrace();
} }
return new String(baos.toByteArray()); return "";
} }
/** @return empty string, writes directly to _out */
public String getShitlistSummary() { public String getShitlistSummary() {
ByteArrayOutputStream baos = new ByteArrayOutputStream(4*1024);
try { try {
_context.shitlist().renderStatusHTML(new OutputStreamWriter(baos)); _context.shitlist().renderStatusHTML(_out);
} catch (IOException ioe) { } catch (IOException ioe) {
ioe.printStackTrace(); ioe.printStackTrace();
} }
return new String(baos.toByteArray()); return "";
} }
} }

View File

@ -66,6 +66,7 @@
<a name="shitlist"> </a> <a name="shitlist"> </a>
<jsp:useBean class="net.i2p.router.web.ProfilesHelper" id="profilesHelper" scope="request" /> <jsp:useBean class="net.i2p.router.web.ProfilesHelper" id="profilesHelper" scope="request" />
<jsp:setProperty name="profilesHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" /> <jsp:setProperty name="profilesHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
<jsp:setProperty name="profilesHelper" property="writer" value="<%=out%>" />
<jsp:getProperty name="profilesHelper" property="shitlistSummary" /> <jsp:getProperty name="profilesHelper" property="shitlistSummary" />
<div class="wideload"> <div class="wideload">
<jsp:getProperty name="peerhelper" property="blocklistSummary" /> <jsp:getProperty name="peerhelper" property="blocklistSummary" />

View File

@ -9,6 +9,7 @@
<div class="main" id="main"><div class="wideload"> <div class="main" id="main"><div class="wideload">
<jsp:useBean class="net.i2p.router.web.ProfilesHelper" id="profilesHelper" scope="request" /> <jsp:useBean class="net.i2p.router.web.ProfilesHelper" id="profilesHelper" scope="request" />
<jsp:setProperty name="profilesHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" /> <jsp:setProperty name="profilesHelper" property="contextId" value="<%=(String)session.getAttribute("i2p.contextId")%>" />
<jsp:setProperty name="profilesHelper" property="writer" value="<%=out%>" />
<jsp:getProperty name="profilesHelper" property="profileSummary" /> <jsp:getProperty name="profilesHelper" property="profileSummary" />
<a name="shitlist"> </a> <a name="shitlist"> </a>
<jsp:getProperty name="profilesHelper" property="shitlistSummary" /> <jsp:getProperty name="profilesHelper" property="shitlistSummary" />

View File

@ -1,3 +1,17 @@
2009-10-16 zzz
* Crypto: Two more test classes out of the lib
* FloodfillMonitor: Slow down the volunteers again
* oldconsole.jsp: Remove almost all of it, add lines for tino
* MessageHistory: Cleanups
* NetDb: Rework part 1 of N:
- Flood only to those closest to the key
- Java 5 fixups
* ProfileOrganizerRenderer:
- Move to routerconsole
- Write directly to Writer for speed
* Router: Add router.hideFloodfillParticipant option for testing
* StatisticsManager: Cleanup after release
* 2009-10-12 0.7.7 released * 2009-10-12 0.7.7 released
2009-10-11 zzz 2009-10-11 zzz

View File

@ -319,3 +319,5 @@ bob.i2p=5uAI6ENnNP8acDkAtYqBjLa-gFqM~uERNZdJBKtRwUvDkuXvIM66pYfYL3-ugTmoR-SIveDl
sponge.i2p=VG4Bd~q1RA3BdoF3z5fSR7p0xe1CTVgDMWVGyFchA9Wm2iXUkIR35G45XE31Uc9~IOt-ktNLL2~TYQZ13Vl8udosngDn8RJG1NtVASH4khsbgkkoFLWd6UuvuOjQKBFKjaEPJgxOzh0kxolRPPNHhFuuAGzNLKvz~LI2MTf0P6nwmRg1lBoRIUpSVocEHY4X306nT2VtY07FixbJcPCU~EeRin24yNoiZop-C3Wi1SGwJJK-NS7mnkNzd8ngDJXDJtR-wLP1vNyyBY6NySgqPiIhENHoVeXd5krlR42HORCxEDb4jhoqlbyJq-PrhTJ5HdH4-~gEq09B~~NIHzy7X02XgmBXhTYRtl6HbLMXs6SI5fq9OFgVp5YZWYUklJjMDI7jOrGrEZGSHhnJK9kT6D3CqVIM0cYEhe4ttmTegbZvC~J6DrRTIAX422qRQJBPsTUnv4iFyuJE-8SodP6ikTjRH21Qx73SxqOvmrOiu7Bsp0lvVDa84aoaYLdiGv87AAAA sponge.i2p=VG4Bd~q1RA3BdoF3z5fSR7p0xe1CTVgDMWVGyFchA9Wm2iXUkIR35G45XE31Uc9~IOt-ktNLL2~TYQZ13Vl8udosngDn8RJG1NtVASH4khsbgkkoFLWd6UuvuOjQKBFKjaEPJgxOzh0kxolRPPNHhFuuAGzNLKvz~LI2MTf0P6nwmRg1lBoRIUpSVocEHY4X306nT2VtY07FixbJcPCU~EeRin24yNoiZop-C3Wi1SGwJJK-NS7mnkNzd8ngDJXDJtR-wLP1vNyyBY6NySgqPiIhENHoVeXd5krlR42HORCxEDb4jhoqlbyJq-PrhTJ5HdH4-~gEq09B~~NIHzy7X02XgmBXhTYRtl6HbLMXs6SI5fq9OFgVp5YZWYUklJjMDI7jOrGrEZGSHhnJK9kT6D3CqVIM0cYEhe4ttmTegbZvC~J6DrRTIAX422qRQJBPsTUnv4iFyuJE-8SodP6ikTjRH21Qx73SxqOvmrOiu7Bsp0lvVDa84aoaYLdiGv87AAAA
docs.i2p2.i2p=BhSHFwXW7zGTLRVTRuTfgxWC1PxKGDyY2OYpS0IrDnYhQklWVFxHz4xVpw8UXo8LTFXAnAjknrcYdLt6DfHcO-ZkFPo5UbOIfywSsHoet4J6BQ1MOt1MLTAejks4Rkj3~sfK2fJHHvHYTjm1v5~f8c13ZH5fPfQ3A71RRCyiYaeO5-VxC6rqvW~z0dNO~-jakjwD7tHtzQL2vQTqarYT859yUiHmLJ~yw5jXfxNBhlxIxaXg0Nat9S5N2W4Eqemy-UYtSGOM4IUGKoM902JxhVpz~O1~iB5H211E3x-o8dKTt9Yz2G5Qcp1kRB0NCO2Noivsxjnfv~64zoUVbPepyJFQKenRtX844HgOESNcUp~FoVzI~QJne5irJDMLK1dNsua3L1kz0MA-2Aev8byWe4TIXeZCuDpYi4bRK6OPKDETwJG8edw7CFtsQaFI-2wGMFu8GDH7pUL8~1qyDjjFv5c~q1MFhty9q8LRUGHHgWP47u9n8OX4zcS4P1~5z2M3AAAA docs.i2p2.i2p=BhSHFwXW7zGTLRVTRuTfgxWC1PxKGDyY2OYpS0IrDnYhQklWVFxHz4xVpw8UXo8LTFXAnAjknrcYdLt6DfHcO-ZkFPo5UbOIfywSsHoet4J6BQ1MOt1MLTAejks4Rkj3~sfK2fJHHvHYTjm1v5~f8c13ZH5fPfQ3A71RRCyiYaeO5-VxC6rqvW~z0dNO~-jakjwD7tHtzQL2vQTqarYT859yUiHmLJ~yw5jXfxNBhlxIxaXg0Nat9S5N2W4Eqemy-UYtSGOM4IUGKoM902JxhVpz~O1~iB5H211E3x-o8dKTt9Yz2G5Qcp1kRB0NCO2Noivsxjnfv~64zoUVbPepyJFQKenRtX844HgOESNcUp~FoVzI~QJne5irJDMLK1dNsua3L1kz0MA-2Aev8byWe4TIXeZCuDpYi4bRK6OPKDETwJG8edw7CFtsQaFI-2wGMFu8GDH7pUL8~1qyDjjFv5c~q1MFhty9q8LRUGHHgWP47u9n8OX4zcS4P1~5z2M3AAAA
paste.i2p2.i2p=PbHXL5PXan7siJiFcUAV~VC0JCLxgnoOnZFjyvJ0dbYlQ3fi1K6SD961pjQ51OSeTnbe5iGRzbY2X0~pG4k~hexau4NizxprAdgdiC-4J3-xpVRjZ4IxuMoDXp-V8Nhv8pLCQcxiEXbWft2v7zLvkp2y6uqH7kab8FXL~z568rMMH0DDs8imwAawasyGtLLo77X8n-C0K~7orcWDVZicWABJ-zky1Zlllx~Y~S8RHWyN4dueP6wkH484b81xNbbt3P-HzE3TcKAvUcSV1Bq4J5UNafQYU7DhV7roUtw4HuJYoxiXnlXVeC-uTCGF~bPrjrB-~Yn0KyObmXs5yvAcKHIS2tgmlsP9nahyn1ZOrlZc0L3DEsv4rkfQyzHVBxcCzMUOchWehE09GLy3bviWZ43lB7kU8kRaja7G4xLrD-CXNXq6q7WNYXrqX7EmtsvCo8VDcFn2ODyLb3eyDe~CkO7ES7mv3u8jJxJRQEcjj71pvu7bMzSMh-xN08X6vx9AAAAA paste.i2p2.i2p=PbHXL5PXan7siJiFcUAV~VC0JCLxgnoOnZFjyvJ0dbYlQ3fi1K6SD961pjQ51OSeTnbe5iGRzbY2X0~pG4k~hexau4NizxprAdgdiC-4J3-xpVRjZ4IxuMoDXp-V8Nhv8pLCQcxiEXbWft2v7zLvkp2y6uqH7kab8FXL~z568rMMH0DDs8imwAawasyGtLLo77X8n-C0K~7orcWDVZicWABJ-zky1Zlllx~Y~S8RHWyN4dueP6wkH484b81xNbbt3P-HzE3TcKAvUcSV1Bq4J5UNafQYU7DhV7roUtw4HuJYoxiXnlXVeC-uTCGF~bPrjrB-~Yn0KyObmXs5yvAcKHIS2tgmlsP9nahyn1ZOrlZc0L3DEsv4rkfQyzHVBxcCzMUOchWehE09GLy3bviWZ43lB7kU8kRaja7G4xLrD-CXNXq6q7WNYXrqX7EmtsvCo8VDcFn2ODyLb3eyDe~CkO7ES7mv3u8jJxJRQEcjj71pvu7bMzSMh-xN08X6vx9AAAAA
thepiratebay.i2p=9Sng0F-cAyGgPyr1xenqY87Uf2AG~PlTXzUJRQnr7gpYFPUfRlH3OfWOVFat67jUl37ZzWBOSYC7-6YqFzPQV5u~DrTGaoImfa7BsRbnBlPXbSNIaP59C6Vp1RCbhtTpxQ4PvLLn9xzUKWoPkrQ222TCSxPR6-j2ChSuuCIA7bP7EP75BobL6n9hMmekzB4FbmBhC64Kri72Uhv~rMDdMZaDD9cD9-BgnZkyrI5jRtSuOUVnTexKMQ0UiYHanSDlBvNwLRMGdb0AsckOdXHrleKrwPnW4YTp0q89dPGP6fad4sVxgvxLHF6NuoWXGbnD0sYuv5qkegjBzioHOjxI~n52ObdeELVhs~peeiXpRavZcwlu1HzwNKfU8lJrpnLSoQsCuqd4OBFMmjvo3HhovLsTeUAo1W2O1F8gcPeOj3tD0ihInncMIbEUTI7kdbkBTsoMY8~73jKgQYC0c~hiEUb1tG4NLcfdxgAlWF5q9cJPDHFh9jtzDvq63OntBQ5OAAAA
tracker.thepiratebay.i2p=4svjpPox0vc527neBdWyfLiVulEeHtzQrC6IDDB2~rPwnZYWm3xsyrDYTa9gu5~1QrFitr5RMCcj34tzZZCKIg~INFNhi7Zk7UwsOCHtedS0RpRjDi2O3q~T~k8D4P39Rz0So91D624lofDV48itdkX8B3dNUHE0Qq5hCGjb2UVxLUhKh8DYOUAqYPoLaF1RpQx5DT~r-Hf57vA9bW3Q31xYH~Ys6AxCZ8~EmMqdgm0ZMQ57oWldHgkSbtQsoiBn2igJ24GDUDUvBsRVLt7He1nKg1ei2JvqQajKN31cQeS5fjqiGdUTkXjc1FftKB8HC9CbnsMJjPEFT6gvvtSpxULvSQGSJyD6OIzebXvQIYANAapEk1VP1OSIJlteOIwGDXkGj9ZyLpT7~RpUpk92v9L53Zjof~WjJmGqqWWsL~yypl0~nMUw5MLaKv5AJywDnFIJDR-GlkQQj5fECeefge30Y92CHwxgImwj3v7~DwXuU9d5u6KyzJSuByOGRvwVAAAA

View File

@ -23,7 +23,7 @@ number of session keys transferred, by improving the
methods of dropping messages during overload, and by reducing methods of dropping messages during overload, and by reducing
drops by high-bandwidth routers. drops by high-bandwidth routers.
There is a new <a href="/configupdate.jsp">unsigned update option</a> There is a new <a href="/configupdate.jsp">unsigned update option</a>
for those of you that would like automatic udpates to bleeding-edge development versions. for those of you that would like automatic updates to bleeding-edge development versions.
</p><p> </p><p>
The release also contains several changes to reduce memory and CPU usage, The release also contains several changes to reduce memory and CPU usage,
and a large assortment of bug fixes. and a large assortment of bug fixes.

View File

@ -463,14 +463,15 @@ public class Router {
ri.addCapability(CAPABILITY_BW256); ri.addCapability(CAPABILITY_BW256);
} }
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context)) // if prop set to true, don't tell people we are ff even if we are
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(_context) &&
!Boolean.valueOf(_context.getProperty("router.hideFloodfillParticipant")).booleanValue())
ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL); ri.addCapability(FloodfillNetworkDatabaseFacade.CAPACITY_FLOODFILL);
if("true".equalsIgnoreCase(_context.getProperty(Router.PROP_HIDDEN, "false"))) if(Boolean.valueOf(_context.getProperty(PROP_HIDDEN)).booleanValue())
ri.addCapability(RouterInfo.CAPABILITY_HIDDEN); ri.addCapability(RouterInfo.CAPABILITY_HIDDEN);
String forceUnreachable = _context.getProperty(PROP_FORCE_UNREACHABLE); if (Boolean.valueOf(_context.getProperty(PROP_FORCE_UNREACHABLE)).booleanValue()) {
if ( (forceUnreachable != null) && ("true".equalsIgnoreCase(forceUnreachable)) ) {
ri.addCapability(CAPABILITY_UNREACHABLE); ri.addCapability(CAPABILITY_UNREACHABLE);
return; return;
} }
@ -582,7 +583,13 @@ public class Router {
//_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context)); //_context.inNetMessagePool().registerHandlerJobBuilder(TunnelMessage.MESSAGE_TYPE, new TunnelMessageHandler(_context));
} }
/**
* this is for oldconsole.jsp, pretty much unused except as a way to get memory info,
* so let's comment out the rest, it is available elsewhere, and we don't really
* want to spend a minute rendering a multi-megabyte page in memory.
*/
public void renderStatusHTML(Writer out) throws IOException { public void renderStatusHTML(Writer out) throws IOException {
/****************
out.write("<h1>Router console</h1>\n" + out.write("<h1>Router console</h1>\n" +
"<i><a href=\"/oldconsole.jsp\">console</a> | <a href=\"/oldstats.jsp\">stats</a></i><br>\n" + "<i><a href=\"/oldconsole.jsp\">console</a> | <a href=\"/oldstats.jsp\">stats</a></i><br>\n" +
"<form action=\"/oldconsole.jsp\">" + "<form action=\"/oldconsole.jsp\">" +
@ -599,21 +606,25 @@ public class Router {
"<option value=\"/oldconsole.jsp#logs\">Log messages</option>\n" + "<option value=\"/oldconsole.jsp#logs\">Log messages</option>\n" +
"</select> <input type=\"submit\" value=\"GO\" /> </form>" + "</select> <input type=\"submit\" value=\"GO\" /> </form>" +
"<hr>\n"); "<hr>\n");
**************/
StringBuilder buf = new StringBuilder(32*1024); StringBuilder buf = new StringBuilder(4*1024);
// Please don't change the text or formatting, tino matches it in his scripts
if ( (_routerInfo != null) && (_routerInfo.getIdentity() != null) ) if ( (_routerInfo != null) && (_routerInfo.getIdentity() != null) )
buf.append("<b>Router: </b> ").append(_routerInfo.getIdentity().getHash().toBase64()).append("<br>\n"); buf.append("<b>Router: </b> ").append(_routerInfo.getIdentity().getHash().toBase64()).append("<br>\n");
buf.append("<b>As of: </b> ").append(new Date(_context.clock().now())).append(" (uptime: ").append(DataHelper.formatDuration(getUptime())).append(") <br>\n"); buf.append("<b>As of: </b> ").append(new Date(_context.clock().now())).append("<br>\n");
buf.append("<b>RouterUptime: </b> " ).append(DataHelper.formatDuration(getUptime())).append(" <br>\n");
buf.append("<b>Started on: </b> ").append(new Date(getWhenStarted())).append("<br>\n"); buf.append("<b>Started on: </b> ").append(new Date(getWhenStarted())).append("<br>\n");
buf.append("<b>Clock offset: </b> ").append(_context.clock().getOffset()).append("ms (OS time: ").append(new Date(_context.clock().now() - _context.clock().getOffset())).append(")<br>\n"); buf.append("<b>Clock offset: </b> ").append(_context.clock().getOffset()).append("ms (OS time: ").append(new Date(_context.clock().now() - _context.clock().getOffset())).append(")<br>\n");
buf.append("<b>RouterVersion:</b> ").append(RouterVersion.FULL_VERSION).append(" / SDK: ").append(CoreVersion.VERSION).append("<br>\n");
long tot = Runtime.getRuntime().totalMemory()/1024; long tot = Runtime.getRuntime().totalMemory()/1024;
long free = Runtime.getRuntime().freeMemory()/1024; long free = Runtime.getRuntime().freeMemory()/1024;
buf.append("<b>Memory:</b> In use: ").append((tot-free)).append("KB Free: ").append(free).append("KB <br>\n"); buf.append("<b>Memory:</b> In use: ").append((tot-free)).append("KB Free: ").append(free).append("KB <br>\n");
buf.append("<b>Version:</b> Router: ").append(RouterVersion.VERSION).append(" / SDK: ").append(CoreVersion.VERSION).append("<br>\n");
if (_higherVersionSeen) if (_higherVersionSeen)
buf.append("<b><font color=\"red\">HIGHER VERSION SEEN</font><b> - please <a href=\"http://www.i2p.net/\">check</a> to see if there is a new release out<br>\n"); buf.append("<b><font color=\"red\">HIGHER VERSION SEEN</font><b> - please <a href=\"http://www.i2p.net/\">check</a> to see if there is a new release out<br>\n");
/*********
buf.append("<hr><a name=\"bandwidth\"> </a><h2>Bandwidth</h2>\n"); buf.append("<hr><a name=\"bandwidth\"> </a><h2>Bandwidth</h2>\n");
long sent = _context.bandwidthLimiter().getTotalAllocatedOutboundBytes(); long sent = _context.bandwidthLimiter().getTotalAllocatedOutboundBytes();
long received = _context.bandwidthLimiter().getTotalAllocatedInboundBytes(); long received = _context.bandwidthLimiter().getTotalAllocatedInboundBytes();
@ -768,6 +779,7 @@ public class Router {
buf.append("</pre></td></tr>\n"); buf.append("</pre></td></tr>\n");
} }
buf.append("</table>\n"); buf.append("</table>\n");
***********/
out.write(buf.toString()); out.write(buf.toString());
out.flush(); out.flush();
} }

View File

@ -122,8 +122,7 @@ public class StatisticsManager implements Service {
//includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l }); //includeRate("jobQueue.jobRunSlow", stats, new long[] { 10*60*1000l, 60*60*1000l });
//includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 }); //includeRate("crypto.elGamal.encrypt", stats, new long[] { 60*60*1000 });
// total event count can be used to track uptime // total event count can be used to track uptime
boolean hideTotals = ! RouterVersion.VERSION.equals("0.7.6"); includeRate("tunnel.participatingTunnels", stats, new long[] { 60*60*1000 }, true);
includeRate("tunnel.participatingTunnels", stats, new long[] { 60*60*1000 }, hideTotals);
//includeRate("tunnel.testSuccessTime", stats, new long[] { 10*60*1000l }); //includeRate("tunnel.testSuccessTime", stats, new long[] { 10*60*1000l });
//includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true); //includeRate("client.sendAckTime", stats, new long[] { 60*60*1000 }, true);
//includeRate("udp.sendConfirmTime", stats, new long[] { 10*60*1000 }); //includeRate("udp.sendConfirmTime", stats, new long[] { 10*60*1000 });

View File

@ -50,7 +50,7 @@ class FloodfillMonitorJob extends JobImpl {
// there's a lot of eligible non-floodfills, keep them from all jumping in at once // there's a lot of eligible non-floodfills, keep them from all jumping in at once
// To do: somehow assess the size of the network to make this adaptive? // To do: somehow assess the size of the network to make this adaptive?
if (!ff) if (!ff)
delay *= 3; delay *= 7;
requeue(delay); requeue(delay);
} }

View File

@ -94,9 +94,22 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
} }
} }
private static final int MAX_TO_FLOOD = 9;
/**
* Send to a subset of all floodfill peers.
* We do this to implement Kademlia within the floodfills, i.e.
* we flood to those closest to the key.
*/
public void flood(DataStructure ds) { public void flood(DataStructure ds) {
Hash key;
if (ds instanceof LeaseSet)
key = ((LeaseSet)ds).getDestination().calculateHash();
else
key = ((RouterInfo)ds).getIdentity().calculateHash();
Hash rkey = _context.routingKeyGenerator().getRoutingKey(key);
FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector(); FloodfillPeerSelector sel = (FloodfillPeerSelector)getPeerSelector();
List peers = sel.selectFloodfillParticipants(getKBuckets()); List peers = sel.selectFloodfillParticipants(rkey, MAX_TO_FLOOD, getKBuckets());
int flooded = 0; int flooded = 0;
for (int i = 0; i < peers.size(); i++) { for (int i = 0; i < peers.size(); i++) {
Hash peer = (Hash)peers.get(i); Hash peer = (Hash)peers.get(i);
@ -107,12 +120,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
continue; continue;
DatabaseStoreMessage msg = new DatabaseStoreMessage(_context); DatabaseStoreMessage msg = new DatabaseStoreMessage(_context);
if (ds instanceof LeaseSet) { if (ds instanceof LeaseSet) {
msg.setKey(((LeaseSet)ds).getDestination().calculateHash());
msg.setLeaseSet((LeaseSet)ds); msg.setLeaseSet((LeaseSet)ds);
} else { } else {
msg.setKey(((RouterInfo)ds).getIdentity().calculateHash());
msg.setRouterInfo((RouterInfo)ds); msg.setRouterInfo((RouterInfo)ds);
} }
msg.setKey(key);
msg.setReplyGateway(null); msg.setReplyGateway(null);
msg.setReplyToken(0); msg.setReplyToken(0);
msg.setReplyTunnel(null); msg.setReplyTunnel(null);
@ -125,11 +137,11 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
_context.commSystem().processMessage(m); _context.commSystem().processMessage(m);
flooded++; flooded++;
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Flooding the entry for " + msg.getKey().toBase64() + " to " + peer.toBase64()); _log.info("Flooding the entry for " + key.toBase64() + " to " + peer.toBase64());
} }
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info("Flooded the to " + flooded + " peers"); _log.info("Flooded the data to " + flooded + " of " + peers.size() + " peers");
} }
private static final int FLOOD_PRIORITY = 200; private static final int FLOOD_PRIORITY = 200;

View File

@ -32,16 +32,16 @@ class FloodfillPeerSelector extends PeerSelector {
* @return List of Hash for the peers selected * @return List of Hash for the peers selected
*/ */
@Override @Override
public List selectMostReliablePeers(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { public List<Hash> selectMostReliablePeers(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true); return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, true);
} }
@Override @Override
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false); return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets, false);
} }
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets, boolean preferConnected) { public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets, boolean preferConnected) {
if (peersToIgnore == null) if (peersToIgnore == null)
peersToIgnore = new HashSet(1); peersToIgnore = new HashSet(1);
peersToIgnore.add(_context.routerHash()); peersToIgnore.add(_context.routerHash());
@ -56,30 +56,55 @@ class FloodfillPeerSelector extends PeerSelector {
return rv; return rv;
} }
/** Returned list will not include our own hash */ /**
public List selectFloodfillParticipants(KBucketSet kbuckets) { * @return all floodfills not shitlisted forever. list will not include our own hash
*
*/
public List<Hash> selectFloodfillParticipants(KBucketSet kbuckets) {
if (kbuckets == null) return new ArrayList(); if (kbuckets == null) return new ArrayList();
FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, null, 0); FloodfillSelectionCollector matches = new FloodfillSelectionCollector(null, null, 0);
kbuckets.getAll(matches); kbuckets.getAll(matches);
return matches.getFloodfillParticipants(); return matches.getFloodfillParticipants();
} }
/**
* @return all floodfills not shitlisted foreverx
* @param maxNumRouters max to return
* Sorted by closest to the key if > maxNumRouters, otherwise not
*/
public List<Hash> selectFloodfillParticipants(Hash key, int maxNumRouters, KBucketSet kbuckets) {
List<Hash> ffs = selectFloodfillParticipants(kbuckets);
if (ffs.size() <= maxNumRouters)
return ffs; // unsorted
TreeMap<BigInteger, Hash> sorted = new TreeMap();
for (int i = 0; i < ffs.size(); i++) {
Hash h = ffs.get(i);
BigInteger diff = getDistance(key, h);
sorted.put(diff, h);
}
List<Hash> rv = new ArrayList(maxNumRouters);
for (int i = 0; i < maxNumRouters; i++) {
rv.add(sorted.remove(sorted.firstKey()));
}
return rv;
}
private class FloodfillSelectionCollector implements SelectionCollector { private class FloodfillSelectionCollector implements SelectionCollector {
private TreeMap _sorted; private TreeMap<BigInteger, Hash> _sorted;
private List _floodfillMatches; private List<Hash> _floodfillMatches;
private Hash _key; private Hash _key;
private Set _toIgnore; private Set<Hash> _toIgnore;
private int _matches; private int _matches;
private int _wanted; private int _wanted;
public FloodfillSelectionCollector(Hash key, Set toIgnore, int wanted) { public FloodfillSelectionCollector(Hash key, Set<Hash> toIgnore, int wanted) {
_key = key; _key = key;
_sorted = new TreeMap(); _sorted = new TreeMap();
_floodfillMatches = new ArrayList(1); _floodfillMatches = new ArrayList(8);
_toIgnore = toIgnore; _toIgnore = toIgnore;
_matches = 0; _matches = 0;
_wanted = wanted; _wanted = wanted;
} }
public List getFloodfillParticipants() { return _floodfillMatches; } public List<Hash> getFloodfillParticipants() { return _floodfillMatches; }
private static final int EXTRA_MATCHES = 100; private static final int EXTRA_MATCHES = 100;
public void add(Hash entry) { public void add(Hash entry) {
//if (_context.profileOrganizer().isFailing(entry)) //if (_context.profileOrganizer().isFailing(entry))
@ -115,15 +140,15 @@ class FloodfillPeerSelector extends PeerSelector {
_matches++; _matches++;
} }
/** get the first $howMany entries matching */ /** get the first $howMany entries matching */
public List get(int howMany) { public List<Hash> get(int howMany) {
return get(howMany, false); return get(howMany, false);
} }
public List get(int howMany, boolean preferConnected) { public List<Hash> get(int howMany, boolean preferConnected) {
Collections.shuffle(_floodfillMatches, _context.random()); Collections.shuffle(_floodfillMatches, _context.random());
List rv = new ArrayList(howMany); List<Hash> rv = new ArrayList(howMany);
List badff = new ArrayList(howMany); List<Hash> badff = new ArrayList(howMany);
List unconnectedff = new ArrayList(howMany); List<Hash> unconnectedff = new ArrayList(howMany);
int found = 0; int found = 0;
long now = _context.clock().now(); long now = _context.clock().now();
// Only add in "good" floodfills here... // Only add in "good" floodfills here...

View File

@ -17,10 +17,14 @@ import net.i2p.data.RouterInfo;
import net.i2p.router.Job; import net.i2p.router.Job;
import net.i2p.router.RouterContext; import net.i2p.router.RouterContext;
/**
* This extends StoreJob to fire off a FloodfillVerifyStoreJob after success.
*
*/
class FloodfillStoreJob extends StoreJob { class FloodfillStoreJob extends StoreJob {
private FloodfillNetworkDatabaseFacade _facade; private FloodfillNetworkDatabaseFacade _facade;
/** /**
* Create a new search for the routingKey specified * Send a data structure to the floodfills
* *
*/ */
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) { public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs) {
@ -31,7 +35,7 @@ class FloodfillStoreJob extends StoreJob {
* @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we * @param toSkip set of peer hashes of people we dont want to send the data to (e.g. we
* already know they have it). This can be null. * already know they have it). This can be null.
*/ */
public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) { public FloodfillStoreJob(RouterContext context, FloodfillNetworkDatabaseFacade facade, Hash key, DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip); super(context, facade, key, data, onSuccess, onFailure, timeoutMs, toSkip);
_facade = facade; _facade = facade;
} }

View File

@ -43,10 +43,9 @@ public class PeerSelector {
* @return ordered list of Hash objects * @return ordered list of Hash objects
*/ */
/* FIXME Exporting non-public type through public API FIXME */ /* FIXME Exporting non-public type through public API FIXME */
public List selectMostReliablePeers(Hash key, int numClosest, Set alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API public List<Hash> selectMostReliablePeers(Hash key, int numClosest, Set<Hash> alreadyChecked, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
// get the peers closest to the key // get the peers closest to the key
List nearest = selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets); return selectNearestExplicit(key, numClosest, alreadyChecked, kbuckets);
return nearest;
} }
/** /**
@ -57,10 +56,11 @@ public class PeerSelector {
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/ */
/* FIXME Exporting non-public type through public API FIXME */ /* FIXME Exporting non-public type through public API FIXME */
public List selectNearestExplicit(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API public List<Hash> selectNearestExplicit(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) {// LINT -- Exporting non-public type through public API
if (true) //if (true)
return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets); return selectNearestExplicitThin(key, maxNumRouters, peersToIgnore, kbuckets);
/******
if (peersToIgnore == null) if (peersToIgnore == null)
peersToIgnore = new HashSet(1); peersToIgnore = new HashSet(1);
peersToIgnore.add(_context.routerHash()); peersToIgnore.add(_context.routerHash());
@ -84,6 +84,7 @@ public class PeerSelector {
+ peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = " + peerHashes + " (not including " + peersToIgnore + ") [allHashes.size = "
+ allHashes.size() + "]"); + allHashes.size() + "]");
return peerHashes; return peerHashes;
******/
} }
/** /**
@ -94,7 +95,7 @@ public class PeerSelector {
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/ */
/* FIXME Exporting non-public type through public API FIXME */ /* FIXME Exporting non-public type through public API FIXME */
public List selectNearestExplicitThin(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API public List<Hash> selectNearestExplicitThin(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
if (peersToIgnore == null) if (peersToIgnore == null)
peersToIgnore = new HashSet(1); peersToIgnore = new HashSet(1);
peersToIgnore.add(_context.routerHash()); peersToIgnore.add(_context.routerHash());
@ -109,11 +110,11 @@ public class PeerSelector {
} }
private class MatchSelectionCollector implements SelectionCollector { private class MatchSelectionCollector implements SelectionCollector {
private TreeMap _sorted; private TreeMap<BigInteger, Hash> _sorted;
private Hash _key; private Hash _key;
private Set _toIgnore; private Set<Hash> _toIgnore;
private int _matches; private int _matches;
public MatchSelectionCollector(Hash key, Set toIgnore) { public MatchSelectionCollector(Hash key, Set<Hash> toIgnore) {
_key = key; _key = key;
_sorted = new TreeMap(); _sorted = new TreeMap();
_toIgnore = toIgnore; _toIgnore = toIgnore;
@ -135,8 +136,8 @@ public class PeerSelector {
_matches++; _matches++;
} }
/** get the first $howMany entries matching */ /** get the first $howMany entries matching */
public List get(int howMany) { public List<Hash> get(int howMany) {
List rv = new ArrayList(howMany); List<Hash> rv = new ArrayList(howMany);
for (int i = 0; i < howMany; i++) { for (int i = 0; i < howMany; i++) {
if (_sorted.size() <= 0) if (_sorted.size() <= 0)
break; break;
@ -151,6 +152,7 @@ public class PeerSelector {
* strip out all of the peers that are failing * strip out all of the peers that are failing
* *
*/ */
/********
private void removeFailingPeers(Set peerHashes) { private void removeFailingPeers(Set peerHashes) {
List failing = null; List failing = null;
for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) { for (Iterator iter = peerHashes.iterator(); iter.hasNext(); ) {
@ -184,6 +186,7 @@ public class PeerSelector {
if (failing != null) if (failing != null)
peerHashes.removeAll(failing); peerHashes.removeAll(failing);
} }
**********/
public static BigInteger getDistance(Hash targetKey, Hash routerInQuestion) { public static BigInteger getDistance(Hash targetKey, Hash routerInQuestion) {
// plain XOR of the key and router // plain XOR of the key and router
@ -199,7 +202,7 @@ public class PeerSelector {
* @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined) * @return List of Hash for the peers selected, ordered by bucket (but intra bucket order is not defined)
*/ */
/* FIXME Exporting non-public type through public API FIXME */ /* FIXME Exporting non-public type through public API FIXME */
public List selectNearest(Hash key, int maxNumRouters, Set peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API public List<Hash> selectNearest(Hash key, int maxNumRouters, Set<Hash> peersToIgnore, KBucketSet kbuckets) { // LINT -- Exporting non-public type through public API
// sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia // sure, this may not be exactly correct per kademlia (peers on the border of a kbucket in strict kademlia
// would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm. // would behave differently) but I can see no reason to keep around an /additional/ more complicated algorithm.
// later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance() // later if/when selectNearestExplicit gets costly, we may revisit this (since kbuckets let us cache the distance()

View File

@ -56,7 +56,7 @@ class StoreJob extends JobImpl {
private final static int STORE_PRIORITY = 100; private final static int STORE_PRIORITY = 100;
/** /**
* Create a new search for the routingKey specified * Send a data structure to the floodfills
* *
*/ */
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
@ -69,7 +69,7 @@ class StoreJob extends JobImpl {
* already know they have it). This can be null. * already know they have it). This can be null.
*/ */
public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key, public StoreJob(RouterContext context, KademliaNetworkDatabaseFacade facade, Hash key,
DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set toSkip) { DataStructure data, Job onSuccess, Job onFailure, long timeoutMs, Set<Hash> toSkip) {
super(context); super(context);
_log = context.logManager().getLog(StoreJob.class); _log = context.logManager().getLog(StoreJob.class);
getContext().statManager().createRateStat("netDb.storeRouterInfoSent", "How many routerInfo store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); getContext().statManager().createRateStat("netDb.storeRouterInfoSent", "How many routerInfo store messages have we sent?", "NetworkDatabase", new long[] { 5*60*1000l, 60*60*1000l, 24*60*60*1000l });
@ -146,7 +146,7 @@ class StoreJob extends JobImpl {
// This will help minimize active connections for floodfill peers and allow // This will help minimize active connections for floodfill peers and allow
// the network to scale. // the network to scale.
// Perhaps the ultimate solution is to send RouterInfos through a lease also. // Perhaps the ultimate solution is to send RouterInfos through a lease also.
List closestHashes; List<Hash> closestHashes;
if (_state.getData() instanceof RouterInfo) if (_state.getData() instanceof RouterInfo)
closestHashes = getMostReliableRouters(_state.getTarget(), toCheck, _state.getAttempted()); closestHashes = getMostReliableRouters(_state.getTarget(), toCheck, _state.getAttempted());
else else
@ -165,8 +165,8 @@ class StoreJob extends JobImpl {
//_state.addPending(closestHashes); //_state.addPending(closestHashes);
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
_log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes); _log.info(getJobId() + ": Continue sending key " + _state.getTarget() + " after " + _state.getAttempted().size() + " tries to " + closestHashes);
for (Iterator iter = closestHashes.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = closestHashes.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = iter.next();
DataStructure ds = _facade.getDataStore().get(peer); DataStructure ds = _facade.getDataStore().get(peer);
if ( (ds == null) || !(ds instanceof RouterInfo) ) { if ( (ds == null) || !(ds instanceof RouterInfo) ) {
if (_log.shouldLog(Log.INFO)) if (_log.shouldLog(Log.INFO))
@ -215,7 +215,7 @@ class StoreJob extends JobImpl {
* *
* @return ordered list of Hash objects * @return ordered list of Hash objects
*/ */
private List getClosestRouters(Hash key, int numClosest, Set alreadyChecked) { private List<Hash> getClosestRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key); Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
//if (_log.shouldLog(Log.DEBUG)) //if (_log.shouldLog(Log.DEBUG))
// _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey); // _log.debug(getJobId() + ": Current routing key for " + key + ": " + rkey);
@ -225,7 +225,7 @@ class StoreJob extends JobImpl {
return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, ks); return _peerSelector.selectNearestExplicit(rkey, numClosest, alreadyChecked, ks);
} }
private List getMostReliableRouters(Hash key, int numClosest, Set alreadyChecked) { private List<Hash> getMostReliableRouters(Hash key, int numClosest, Set<Hash> alreadyChecked) {
Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key); Hash rkey = getContext().routingKeyGenerator().getRoutingKey(key);
KBucketSet ks = _facade.getKBuckets(); KBucketSet ks = _facade.getKBuckets();
if (ks == null) return new ArrayList(); if (ks == null) return new ArrayList();

View File

@ -15,12 +15,12 @@ class StoreState {
private RouterContext _context; private RouterContext _context;
private Hash _key; private Hash _key;
private DataStructure _data; private DataStructure _data;
private final HashSet _pendingPeers; private final HashSet<Hash> _pendingPeers;
private HashMap _pendingPeerTimes; private HashMap<Hash, Long> _pendingPeerTimes;
private final HashSet _successfulPeers; private final HashSet<Hash> _successfulPeers;
private final HashSet _successfulExploratoryPeers; private final HashSet<Hash> _successfulExploratoryPeers;
private final HashSet _failedPeers; private final HashSet<Hash> _failedPeers;
private final HashSet _attemptedPeers; private final HashSet<Hash> _attemptedPeers;
private int _completeCount; private int _completeCount;
private volatile long _completed; private volatile long _completed;
private volatile long _started; private volatile long _started;
@ -28,7 +28,7 @@ class StoreState {
public StoreState(RouterContext ctx, Hash key, DataStructure data) { public StoreState(RouterContext ctx, Hash key, DataStructure data) {
this(ctx, key, data, null); this(ctx, key, data, null);
} }
public StoreState(RouterContext ctx, Hash key, DataStructure data, Set toSkip) { public StoreState(RouterContext ctx, Hash key, DataStructure data, Set<Hash> toSkip) {
_context = ctx; _context = ctx;
_key = key; _key = key;
_data = data; _data = data;
@ -48,29 +48,29 @@ class StoreState {
public Hash getTarget() { return _key; } public Hash getTarget() { return _key; }
public DataStructure getData() { return _data; } public DataStructure getData() { return _data; }
public Set getPending() { public Set<Hash> getPending() {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
return (Set)_pendingPeers.clone(); return (Set<Hash>)_pendingPeers.clone();
} }
} }
public Set getAttempted() { public Set<Hash> getAttempted() {
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
return (Set)_attemptedPeers.clone(); return (Set<Hash>)_attemptedPeers.clone();
} }
} }
public Set getSuccessful() { public Set<Hash> getSuccessful() {
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
return (Set)_successfulPeers.clone(); return (Set<Hash>)_successfulPeers.clone();
} }
} }
public Set getSuccessfulExploratory() { public Set<Hash> getSuccessfulExploratory() {
synchronized (_successfulExploratoryPeers) { synchronized (_successfulExploratoryPeers) {
return (Set)_successfulExploratoryPeers.clone(); return (Set<Hash>)_successfulExploratoryPeers.clone();
} }
} }
public Set getFailed() { public Set<Hash> getFailed() {
synchronized (_failedPeers) { synchronized (_failedPeers) {
return (Set)_failedPeers.clone(); return (Set<Hash>)_failedPeers.clone();
} }
} }
public boolean completed() { return _completed != -1; } public boolean completed() { return _completed != -1; }
@ -92,10 +92,10 @@ class StoreState {
_attemptedPeers.add(peer); _attemptedPeers.add(peer);
} }
} }
public void addPending(Collection pending) { public void addPending(Collection<Hash> pending) {
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.addAll(pending); _pendingPeers.addAll(pending);
for (Iterator iter = pending.iterator(); iter.hasNext(); ) for (Iterator<Hash> iter = pending.iterator(); iter.hasNext(); )
_pendingPeerTimes.put(iter.next(), new Long(_context.clock().now())); _pendingPeerTimes.put(iter.next(), new Long(_context.clock().now()));
} }
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
@ -113,7 +113,7 @@ class StoreState {
long rv = -1; long rv = -1;
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer); Long when = _pendingPeerTimes.remove(peer);
if (when != null) if (when != null)
rv = _context.clock().now() - when.longValue(); rv = _context.clock().now() - when.longValue();
} }
@ -128,7 +128,7 @@ class StoreState {
long rv = -1; long rv = -1;
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
_pendingPeers.remove(peer); _pendingPeers.remove(peer);
Long when = (Long)_pendingPeerTimes.remove(peer); Long when = _pendingPeerTimes.remove(peer);
if (when != null) if (when != null)
rv = _context.clock().now() - when.longValue(); rv = _context.clock().now() - when.longValue();
} }
@ -159,43 +159,43 @@ class StoreState {
buf.append(" Attempted: "); buf.append(" Attempted: ");
synchronized (_attemptedPeers) { synchronized (_attemptedPeers) {
buf.append(_attemptedPeers.size()).append(' '); buf.append(_attemptedPeers.size()).append(' ');
for (Iterator iter = _attemptedPeers.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = _attemptedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Pending: "); buf.append(" Pending: ");
synchronized (_pendingPeers) { synchronized (_pendingPeers) {
buf.append(_pendingPeers.size()).append(' '); buf.append(_pendingPeers.size()).append(' ');
for (Iterator iter = _pendingPeers.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = _pendingPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Failed: "); buf.append(" Failed: ");
synchronized (_failedPeers) { synchronized (_failedPeers) {
buf.append(_failedPeers.size()).append(' '); buf.append(_failedPeers.size()).append(' ');
for (Iterator iter = _failedPeers.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = _failedPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Successful: "); buf.append(" Successful: ");
synchronized (_successfulPeers) { synchronized (_successfulPeers) {
buf.append(_successfulPeers.size()).append(' '); buf.append(_successfulPeers.size()).append(' ');
for (Iterator iter = _successfulPeers.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = _successfulPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
buf.append(" Successful Exploratory: "); buf.append(" Successful Exploratory: ");
synchronized (_successfulExploratoryPeers) { synchronized (_successfulExploratoryPeers) {
buf.append(_successfulExploratoryPeers.size()).append(' '); buf.append(_successfulExploratoryPeers.size()).append(' ');
for (Iterator iter = _successfulExploratoryPeers.iterator(); iter.hasNext(); ) { for (Iterator<Hash> iter = _successfulExploratoryPeers.iterator(); iter.hasNext(); ) {
Hash peer = (Hash)iter.next(); Hash peer = iter.next();
buf.append(peer.toBase64()).append(" "); buf.append(peer.toBase64()).append(" ");
} }
} }
return buf.toString(); return buf.toString();
} }
} }

View File

@ -250,8 +250,4 @@ class PeerManager {
return rv; return rv;
} }
} }
public void renderStatusHTML(Writer out) throws IOException {
_organizer.renderStatusHTML(out);
}
} }

View File

@ -78,8 +78,8 @@ public class PeerManagerFacadeImpl implements PeerManagerFacade {
return _manager.getPeersByCapability(capability); return _manager.getPeersByCapability(capability);
} }
/** @deprecated, moved to routerconsole */
public void renderStatusHTML(Writer out) throws IOException { public void renderStatusHTML(Writer out) throws IOException {
_manager.renderStatusHTML(out);
} }
} }

View File

@ -139,7 +139,7 @@ public class ProfileOrganizer {
} }
public void setUs(Hash us) { _us = us; } public void setUs(Hash us) { _us = us; }
Hash getUs() { return _us; } public Hash getUs() { return _us; }
public double getSpeedThreshold() { return _thresholdSpeedValue; } public double getSpeedThreshold() { return _thresholdSpeedValue; }
public double getCapacityThreshold() { return _thresholdCapacityValue; } public double getCapacityThreshold() { return _thresholdCapacityValue; }
@ -258,11 +258,6 @@ public class ProfileOrganizer {
_persistenceHelper.writeProfile(prof, out); _persistenceHelper.writeProfile(prof, out);
} }
public void renderStatusHTML(Writer out) throws IOException {
ProfileOrganizerRenderer rend = new ProfileOrganizerRenderer(this, _context);
rend.renderStatusHTML(out);
}
/** /**
* Return a set of Hashes for peers that are both fast and reliable. If an insufficient * Return a set of Hashes for peers that are both fast and reliable. If an insufficient
* number of peers are both fast and reliable, fall back onto high capacity peers, and if that * number of peers are both fast and reliable, fall back onto high capacity peers, and if that