propagate from branch 'i2p.i2p.zzz.multisess' (head 655a0c2bbd50625c804b8de8c809b40ed63f53f4)

to branch 'i2p.i2p' (head b977ab50209475c0e74825f361924e05dbd470c7)
This commit is contained in:
zzz
2015-06-17 16:00:53 +00:00
59 changed files with 572 additions and 224 deletions

View File

@ -1738,7 +1738,7 @@ public class SnarkManager implements CompleteListener {
int totalDeleted = 0;
synchronized (_snarks) {
for (Snark snark : _snarks.values()) {
torrents.add(new SHA1Hash(snark.getMetaInfo().getInfoHash()));
torrents.add(new SHA1Hash(snark.getInfoHash()));
}
synchronized (_configLock) {
for (int i = 0; i < B64.length(); i++) {

View File

@ -47,8 +47,8 @@ public class IrcInboundFilter implements Runnable {
in = new BufferedReader(new InputStreamReader(remote.getInputStream(), "ISO-8859-1"));
output=local.getOutputStream();
} catch (IOException e) {
if (_log.shouldLog(Log.ERROR))
_log.error("IrcInboundFilter: no streams",e);
if (_log.shouldLog(Log.WARN))
_log.warn("IrcInboundFilter: no streams",e);
return;
}
if (_log.shouldLog(Log.DEBUG))

View File

@ -47,8 +47,8 @@ public class IrcOutboundFilter implements Runnable {
in = new BufferedReader(new InputStreamReader(local.getInputStream(), "ISO-8859-1"));
output=remote.getOutputStream();
} catch (IOException e) {
if (_log.shouldLog(Log.ERROR))
_log.error("IrcOutboundFilter: no streams",e);
if (_log.shouldLog(Log.WARN))
_log.warn("IrcOutboundFilter: no streams",e);
return;
}
if (_log.shouldLog(Log.DEBUG))

View File

@ -276,6 +276,26 @@ public class ConsoleUpdateManager implements UpdateManager, RouterApp {
return _status;
}
/**
* Is an update available?
* Blocking.
* An available update may still have a constraint or lack sources.
* @param type the UpdateType of this request
* @return new version or null if nothing newer is available
* @since 0.9.21
*/
public String checkAvailable(UpdateType type) {
return checkAvailable(type, "", DEFAULT_CHECK_TIME);
}
/**
* Is an update available?
* Blocking.
* An available update may still have a constraint or lack sources.
* @param type the UpdateType of this request
* @param maxWait max time to block
* @return new version or null if nothing newer is available
*/
public String checkAvailable(UpdateType type, long maxWait) {
return checkAvailable(type, "", maxWait);
}
@ -284,6 +304,8 @@ public class ConsoleUpdateManager implements UpdateManager, RouterApp {
* Is an update available?
* Blocking.
* An available update may still have a constraint or lack sources.
* @param type the UpdateType of this request
* @param id id of this request
* @param maxWait max time to block
* @return new version or null if nothing newer is available
*/

View File

@ -122,6 +122,112 @@ public interface UpdateManager {
*/
public boolean notifyComplete(UpdateTask task, String actualVersion, File file);
/**
* Is an update available?
* Blocking.
* An available update may still have a constraint or lack sources.
* @param type the UpdateType of this request
* @return new version or null if nothing newer is available
* @since 0.9.21
*/
public String checkAvailable(UpdateType type);
/**
* Is an update available?
* Blocking.
* An available update may still have a constraint or lack sources.
* @param type the UpdateType of this request
* @param maxWait max time to block
* @return new version or null if nothing newer is available
* @since 0.9.21
*/
public String checkAvailable(UpdateType type, long maxWait);
/**
* Is an update available?
* Blocking.
* An available update may still have a constraint or lack sources.
* @param type the UpdateType of this request
* @param maxWait max time to block
* @param id id of this request
* @return new version or null if nothing newer is available
* @since 0.9.21
*/
public String checkAvailable(UpdateType type, String id, long maxWait);
/**
* Is a router update being downloaded?
* @return true iff router update is being downloaded
* @since 0.9.21
*/
public boolean isUpdateInProgress();
/**
* Is a router update being downloaded?
* @param type the UpdateType of this request
* @return true iff router update is being downloaded
* @since 0.9.21
*/
public boolean isUpdateInProgress(UpdateType type);
/**
* Is a router update being downloaded?
* @param type the UpdateType of this request
* @param id of this request
* @return true iff router update is being downloaded
* @since 0.9.21
*/
public boolean isUpdateInProgress(UpdateType type, String id);
/**
* Non-blocking. Does not check.
* Fails if check or update already in progress.
* If returns true, then call isUpdateInProgress() in a loop
* @param type the UpdateType of this request
* @return true if task started
* @since 0.9.21
*/
public boolean update(UpdateType type);
/**
* Non-blocking. Does not check.
* Fails if check or update already in progress.
* If returns true, then call isUpdateInProgress() in a loop
* @param type the UpdateType of this request
* @param id id of this request
* @return true if task started
* @since 0.9.21
*/
public boolean update(UpdateType type, String id);
/**
* Non-blocking. Does not check.
* Fails if check or update already in progress.
* If returns true, then call isUpdateInProgress() in a loop
* @param type the UpdateType of this request
* @param maxTime not honored by all Updaters
* @return true if task started
* @since 0.9.21
*/
public boolean update(UpdateType type, long maxTime);
/**
* Non-blocking. Does not check.
* Fails if check or update already in progress.
* If returns true, then call isUpdateInProgress() in a loop
* @param type the UpdateType of this request
* @param maxTime not honored by all Updaters
* @param id id of this request
* @return true if task started
* @since 0.9.21
*/
public boolean update(UpdateType type, String id, long maxTime);
/**
* The status on any update current or last finished.
* @return status or ""
*/
public String getStatus();
/**
* For debugging
*/

View File

@ -110,7 +110,7 @@ public class ResettableGZIPInputStream extends InflaterInputStream {
//if (_lookaheadStream.getEOFReached()) {
if (inf.finished()) {
verifyFooter();
inf.reset(); // so it doesn't bitch about missing data...
inf.reset(); // so it doesn't complain about missing data...
_complete = true;
}
return read;

5
debian/apparmor/i2p vendored
View File

@ -51,11 +51,16 @@
# 'm' is needed by the I2P-Bote plugin
/{,lib/live/mount/overlay/}tmp/ rwm,
owner /{,lib/live/mount/overlay/}tmp/hsperfdata_i2psvc/ rwk,
owner /{,lib/live/mount/overlay/}tmp/hsperfdata_i2psvc/** rw,
owner /{,lib/live/mount/overlay/}tmp/wrapper[0-9]*.tmp rwk,
owner /{,lib/live/mount/overlay/}tmp/wrapper[0-9]*.tmp/** rw,
owner /{,lib/live/mount/overlay/}tmp/i2p-daemon/ rwm,
owner /{,lib/live/mount/overlay/}tmp/i2p-daemon/** rwklm,
# Prevent spamming the logs
deny /dev/tty rw,
deny /{,lib/live/mount/overlay/}var/tmp/ r,
deny @{PROC}/[0-9]*/fd/ r,
deny /usr/sbin/ r,
deny /var/cache/fontconfig/ wk,

2
debian/control vendored
View File

@ -12,7 +12,7 @@ Build-Depends: debhelper (>= 7.0.50~)
,default-jdk | openjdk-7-jdk | openjdk-6-jdk
,dh-apparmor
,gettext
,libgmp3-dev
,libgmp-dev (>= 2:5.0.5)
# uncomment the next line for official builds
# ,libservice-wrapper-java
,libcommons-logging-java

11
debian/i2p.init vendored
View File

@ -32,7 +32,6 @@ WRAPPERLOG="/var/log/i2p/wrapper.log"
RUN_DAEMON="False"
NICE=0
I2PUSER="i2psvc"
USE_AA="yes"
I2P_ARGS="/etc/i2p/wrapper.config \
wrapper.java.additional.1=-DloggerFilenameOverride=/var/log/i2p/log-router-@.txt \
@ -67,6 +66,15 @@ if [ -z "$RUN_DAEMON" ]; then
exit 1
fi
case "$CONFINE_WITH_APPARMOR" in
[NnFf]*)
USE_AA="no"
;;
*)
USE_AA="yes"
;;
esac
case "$RUN_DAEMON" in
[NnFf]*)
log_action_msg "$DESC daemon disabled in /etc/default/$NAME".
@ -83,6 +91,7 @@ esac
do_start()
{
[ ! -z $ULIMIT ] && ulimit -n $ULIMIT
start-stop-daemon --start --quiet --pidfile $PIDFILE --exec $DAEMON --test > /dev/null 2>&1 \
|| return 1
[ -d $RUN ] || mkdir $RUN > /dev/null 2>&1

33
debian/i2p.postinst vendored
View File

@ -6,7 +6,7 @@ I2PHOME=/var/lib/i2p
I2PSYSUSER=i2psvc
conffile="/etc/default/i2p"
#systemdservice="/lib/systemd/system/i2p.service"
systemdservice="/lib/systemd/system/i2p.service"
# Source debconf library -- we have a Depends line
# to make sure it is there...
@ -25,6 +25,7 @@ case "$1" in
echo >> $conffile
echo "RUN_DAEMON=" >> $conffile
echo "I2PUSER=" >> $conffile
echo "ULIMIT=" >> $conffile
echo "CONFINE_WITH_APPARMOR=" >> $conffile
echo "# The next value is also wrapper.java.maxmemory in /etc/i2p/wrapper.config" >> $conffile
echo "MEMORYLIMIT=" >> $conffile
@ -49,6 +50,8 @@ case "$1" in
echo "I2PUSER=" >> $conffile
test -z "$MEMORYLIMIT" || grep -Eq '^ *MEMORYLIMIT=' $conffile || \
echo "MEMORYLIMIT=" >> $conffile
test -z "$ULIMIT" || grep -Eq '^ *ULIMIT=' $conffile || \
echo "ULIMIT=" >> $conffile
test -z "$CONFINE_WITH_APPARMOR" || grep -Eq '^ *CONFINE_WITH_APPARMOR=' $conffile || \
echo "CONFINE_WITH_APPARMOR=" >> $conffile
@ -57,8 +60,6 @@ case "$1" in
I2PUSER="i2psvc"
fi
sed -e "s/^ *RUN_DAEMON=.*/RUN_DAEMON=\"$RUN_DAEMON\"/" \
-e "s/^ *I2PUSER=.*/I2PUSER=\"$I2PUSER\"/" \
-e "s/^ *MEMORYLIMIT=.*/MEMORYLIMIT=\"$MEMORYLIMIT\"/" \
@ -66,19 +67,19 @@ case "$1" in
< $conffile > $conffile.tmp
mv -f $conffile.tmp $conffile
# if [ -e "$systemdservice" ]; then
# sed -e "s/User=.*/User=$I2PUSER/" < "$systemdservice" > "$systemdservice.tmp"
# mv -f "$systemdservice.tmp" "$systemdservice"
# chmod 0644 -f "$systemdservice"
# if grep -q 'systemd' /proc/1/comm > /dev/null 2>&1; then
# systemctl --system daemon-reload
# if [ $RUN_DAEMON = 'true' ]; then
# systemctl enable i2p.service
# else
# systemctl disable i2p.service
# fi
# fi
# fi
if [ -e "$systemdservice" ]; then
sed -e "s/User=.*/User=$I2PUSER/" < "$systemdservice" > "$systemdservice.tmp"
mv -f "$systemdservice.tmp" "$systemdservice"
chmod 0644 -f "$systemdservice"
if grep -q 'systemd' /proc/1/comm > /dev/null 2>&1; then
systemctl --system daemon-reload
if [ $RUN_DAEMON = 'true' ]; then
systemctl enable i2p.service
else
systemctl disable i2p.service
fi
fi
fi
sed -e "s/^ *wrapper\.java\.maxmemory=.*/wrapper\.java\.maxmemory=$MEMORYLIMIT/" \
< /etc/i2p/wrapper.config > /etc/i2p/wrapper.config.tmp

28
debian/i2p.service vendored
View File

@ -1,10 +1,31 @@
# It's not recommended to modify this file because it will be
# overwritten during package upgrades. If you want to make changes, the
# best way is to create a file "/etc/systemd/system/i2p.service.d/foo.conf"
# and make your changes there. This file will be parsed after the file
# i2p.service itself is parsed.
#
# For more info about custom unit files, see systemd.unit(5) or
# http://fedoraproject.org/wiki/Systemd#How_do_I_customize_a_unit_file.2F_add_a_custom_unit_file.3F
# For example, if you want to increase I2P's open-files-limit to 10000,
# you need to increase systemd's LimitNOFILE setting, so create a file named
# "/etc/systemd/system/i2p.service.d/limits.conf" containing:
# [Service]
# LimitNOFILE=10000
# Don't forget to reload systemd daemon after you change unit configuration:
# root> systemctl --system daemon-reload
[Unit]
Description=load-balanced unspoofable packet switching network
After=network.target
After=local-fs.target network.target time-sync.target
[Service]
Type=forking
EnvironmentFile=/etc/default/i2p
RuntimeDirectory=i2p
RuntimeDirectoryMode=750
PIDFile=/run/i2p/i2p.pid
Environment="I2P_ARGS=/etc/i2p/wrapper.config \
wrapper.java.additional.1=-DloggerFilenameOverride=/var/log/i2p/log-router-@.txt \
wrapper.java.additional.10=-Dwrapper.logfile=/var/log/i2p/wrapper.log \
@ -16,11 +37,12 @@ Environment="I2P_ARGS=/etc/i2p/wrapper.config \
wrapper.daemonize=TRUE" TZ=UTC
User=i2psvc
PermissionsStartOnly=true
ExecStartPre=/bin/mkdir -p /run/i2p /tmp/i2p-daemon
AppArmorProfile=system_i2p
ExecStartPre=/bin/mkdir -p /tmp/i2p-daemon
ExecStartPre=/bin/chown -R ${I2PUSER}:${I2PUSER} /var/log/i2p /run/i2p /tmp/i2p-daemon
ExecStartPre=/bin/chmod 750 /var/log/i2p
ExecStart=/usr/sbin/wrapper "$I2P_ARGS"
ExecStopPost=/bin/rm -rf /run/i2p /tmp/i2p-daemon
ExecStopPost=/bin/rm -rf /run/i2p
[Install]
WantedBy=multi-user.target

View File

@ -1,3 +1,29 @@
2015-06-13 zzz
* i2psnark: Fix NPE (ticket #1602)
* NetDB:
- Improve routing of DatabaseStoreMessage acks
- Send our own RI unsolicited in reply if we aren't floodfill
- Don't ack or flood a store of an unknown type
* PeerTestJob: Don't generate zero reply token
* Tunnels: More checks of messages received down exploratory tunnels
2015-06-08 dg
* Language fixes
* Make netDb.storeFloodNew graphable for testing (#1195)
* Directly connect to nearby floodfills to share our RI
to speed up integration of new floodfills (#1195)
* Silence Irc{Inbound,Outbound}Filter warnings about 'no streams'
when we can't connect to an IRC server. Change to WARN.
2015-06-07 zzz
* Logs: Correct wrapper.config location when running as a service
* NetDB: Fix early NPE
* SSU: Possible fix for NPE in establisher
2015-06-06 zzz
* Console: Add indication of current ff status on /configadvanced,
change immediately when config changes, force republish
2015-06-06 str4d
* newsxml: Don't use XXX for parsing dates on Android
@ -4347,7 +4373,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
- Use new synchronized change-and-save-config methods
to eliminate races with ReadConfigJob
* Tunnels:
- When a peer is shitlisted, fail all our tunnels where
- When a peer is banlisted, fail all our tunnels where
that peer is the adjacent hop. In particular this
will remove outbound tunnels when we can't contact
the first hop, and enable quicker recovery.
@ -4866,7 +4892,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2011-09-30 zzz
* logs.jsp: Add wrapper version
* Shitlist: Shorten time
* Banlist: Shorten time
* Wrapper: Update armv7 to 3.5.12
2011-09-30 kytv
@ -4957,7 +4983,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
tunnels from being expired and causing high CPU usage
2011-09-08 zzz
* Blocklist: Include IP in shitlist reason
* Blocklist: Include IP in banlist reason
* Ministreaming: Drop old classes replaced by streaming
years ago.
* NTCP: Hopefully fix race NPE, thx devzero
@ -7129,7 +7155,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* Transport clock skews:
- Store and report UDP clock skews even for large values, so
a badly skewed local clock will be reported to the console
- Don't shitlist for NTCP clock skew if we don't know what time it is
- Don't banlist for NTCP clock skew if we don't know what time it is
- If NTP hasn't worked yet, have NTCP or SSU update the clock one time
- Include failed clock skew in NTCP skew vector if there aren't many connections
- Don't include NTCP clock skews for non-established connections
@ -7491,7 +7517,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* addressbook: Move class to net.i2p.addressbook
* build: Take two test scripts out of the installer
* i2psnark: Bye TPB
* Shitlist: Fix bug from two checkins ago, all were forever
* Banlist: Fix bug from two checkins ago, all were forever
2009-11-14 zzz
* HTTP Proxy:
@ -7506,7 +7532,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2009-11-11 zzz
* Console: Some colon cleansing
* FloodfillPeerSelector: Adjustments
* Shitlist: Move HTML renderer to router console,
* Banlist: Move HTML renderer to router console,
add cause parameter for ease of translation,
tag all causes
@ -7854,13 +7880,13 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* logs.jsp: Add system encoding
* Ministreaming: Cleanups, deprecation, move demos out of the lib
* netdb.jsp: Flags for leases
* NTCP: Clean up clock skew shitlist message
* NTCP: Clean up clock skew banlist message
* profiles.jsp:
- Rename the Failing column
- Reduce the time cutoff again to 90m (was 2h)
* readme*html: localhost -> 127.0.0.1
* Router: Don't do some things when we are shutting down
* Shitlist: Clean up expire message
* Banlist: Clean up expire message
* Stats:
- Fix BufferedStatsLog so it works at all
- Don't instantiate BufferedStatsLog unless stats.logFilters
@ -7926,7 +7952,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
including our own
2) It randomly dies after a while
* Console:
- Rename the shitlist and the blocklist
- Rename the banlist and the blocklist
- Try to reduce servlet problems on iframe
- Select server or client icon for local dests
* EepHead: New
@ -8661,7 +8687,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
spiff up tunnels.jsp and profiles.jsp.
Existing installs can get files with 'ant updaterWIthGeoIP'
or in the console docs bundle 'ant consoleDocs'
- Use flags for shitlist and peers.jsp too
- Use flags for banlist and peers.jsp too
- Tweak tunnels.jsp to show class letters
- Hide in-progress details on tunnels.jsp
- Add a little color to confignav
@ -9142,7 +9168,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
- Plug in-progress build leak
2009-02-07 zzz
* ClientConnectionRunner, Shitlist, TunnelDispatcher:
* ClientConnectionRunner, Banlist, TunnelDispatcher:
Update using concurrent
* Streaming ConnectionHandler: Bound SYN queue and
use concurrent to prevent blowup
@ -9290,13 +9316,13 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
- Floodfills periodically shuffle their KBuckets, and
FloodfillPeerSelector sorts more keys, so that
exploration works well
* Shitlist: Reduce max time to 30m (was 60m)
* Banlist: Reduce max time to 30m (was 60m)
* Streaming:
- Reduce default initial window size from 12 to 6,
to account for the MTU increase in the last release
and try to limit initial packet loss
- Reduce fast retransmit threshold from 3 to 2
* Transport: Don't shitlist a peer if we are at our
* Transport: Don't banlist a peer if we are at our
connection limit
2009-01-03 zzz
@ -9420,7 +9446,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-11-21 zzz
* Cache DNS and negative DNS for 5m (was 1m and forever)
* Delay shitlist cleaner at startup
* Delay banlist cleaner at startup
* Strip wrapper properties from client config
* Define multiple cert type
* Prohibit negative maxSends in streaming
@ -9628,7 +9654,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* configpeer.jsp: Add blocklist info
* help.jsp: Add link to German FAQ
* tunnels.jsp: Fix inactive participating count
* SearchReplyJob: Don't look up references to shitlisted peers
* SearchReplyJob: Don't look up references to banlisted peers
* TunnelPeerSelector: Avoid a peer for 20s after a reject or timeout
2008-09-20 zzz
@ -9691,7 +9717,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-09-06 zzz
* EepGet command line: Fix byte counts after a failed resume
* NTCP: Mark unreachable on outbound connection timeout
* Shitlist: Fix partial shitlisting (still unused though)
* Banlist: Fix partial banlisting (still unused though)
* Summary Bar: Warn if firewalled and floodfill
* Throttle: Combine current and last bw measurement,
reduce default max tunnels to 2500 (was 3000)
@ -9735,16 +9761,16 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-08-04 zzz
* Floodfill Peer Selector:
- Avoid peers whose netdb is old, or have a recent failed store,
or are forever-shitlisted
or are forever-banlisted
2008-07-30 zzz
* Blocklists:
- New, disabled by default, except for blocking of
forever-shitlisted peers. See source for instructions
forever-banlisted peers. See source for instructions
and file format.
* Transport - Reject peers from inbound connections:
- Check IP against blocklist
- Check router hash against forever-shitlist, then block IP
- Check router hash against forever-banlist, then block IP
2008-07-16 zzz
* configpeer.jsp: New
@ -9776,7 +9802,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
- Change some logging from WARN to INFO
- Clean up toString()
* SSU:
- Try to pick better introducers by checking shitlist,
- Try to pick better introducers by checking banlist,
wasUnreachable list, failing list, and idle times
- To keep introducer connections up and valid,
periodically send a "ping" (a data packet with no data and no acks)
@ -9863,8 +9889,8 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-06-07 zzz
* NetDb: Tweak some logging on lease problems
* Shitlist:
- Add shitlistForever() and isShitlistedForever(), unused for now
* Banlist:
- Add banlistForever() and isBanlistedForever(), unused for now
- Sort the HTML output by router hash
* netdb.jsp:
- Sort the lease HTML output by dest hash, local first
@ -9906,7 +9932,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
(for SAM for example). Defaults to true of course.
* Logging: Move common WARN output to DEBUG so we can ask users to
set the default log level to WARN without massive spewage
* ProfileOrganizer: Restrict !isSelectable() (i.e. shitlisted) peers from the High Capacity tier,
* ProfileOrganizer: Restrict !isSelectable() (i.e. banlisted) peers from the High Capacity tier,
not just the Fast tier, since we don't use them for tunnels anyway
* SAM: Add some compiler flexibility to two obscure makefiles
* i2psnark: Change displayed peer idents to match that shown by bytemonsoon
@ -10069,7 +10095,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-04-17 zzz
* Reachability:
- Track unreachable peers persistently
(i.e. separately from shitlist, and not cleared when they contact us)
(i.e. separately from banlist, and not cleared when they contact us)
- Exclude detected unreachable peers from inbound tunnels
- Exclude detected unreachable peers from selected leases
- Exclude detected unreachable floodfill peers from lookups
@ -10077,15 +10103,15 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-04-16 zzz
* SSU/Reachability:
- Extend shitlist time from 4-8m to 40-60m
- Add some shitlist logging
- Don't shitlist twice when unreachable on all transports
- Extend banlist time from 4-8m to 40-60m
- Add some banlist logging
- Don't banlist twice when unreachable on all transports
- Exclude netDb-listed unreachable peers from inbound tunnels;
this won't help much since there are very few of these now
- Remove 10s delay on inbound UDP connections used for the
0.6.1.10 transition
- Track and display UDP connection direction on peers.jsp
- Show shitlist status in-line on profiles.jsp
- Show banlist status in-line on profiles.jsp
2008-04-15 zzz
* SSU Reachability/PeerTestManager:
@ -10183,7 +10209,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2008-03-14 zzz
* Floodfill Search:
- Prefer heard-from, unfailing, unshitlisted floodfill peers
- Prefer heard-from, unfailing, unbanlisted floodfill peers
2008-03-14 zzz
* ProfileOrganizer:
@ -10968,8 +10994,8 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
new "identlog.txt" text file in the I2P install directory. For
debugging purposes, publish the count of how many identities the
router has cycled through, though not the identities itself.
* Cleaned up the way the multitransport shitlisting worked, and
added per-transport shitlists
* Cleaned up the way the multitransport banlisting worked, and
added per-transport banlists
* When dropping a router reference locally, first fire a netDb
lookup for the entry
* Take the peer selection filters into account when organizing the
@ -10999,7 +11025,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
even if those shouldn't exist)
2006-07-14 jrandom
* Improve the multitransport shitlisting (thanks Complication!)
* Improve the multitransport banlisting (thanks Complication!)
* Allow routers with a capacity of 16-32KBps to be used in tunnels under
the default configuration (thanks for the stats Complication!)
* Properly allow older router references to load on startup
@ -11021,7 +11047,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
GNU/Classpath based JVMs
* Adjust the Fortuna PRNG's pooling system to reduce contention on
refill with a background thread to refill the output buffer
* Add per-transport support for the shitlist
* Add per-transport support for the banlist
* Add a new async pumped tunnel gateway to reduce tunnel dispatcher
contention
@ -11091,7 +11117,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* SSU modifications to cut down on unnecessary connection failures
2006-05-16 jrandom
* Further shitlist randomizations
* Further banlist randomizations
* Adjust the stats monitored for detecting cpu overload when dropping new
tunnel requests
@ -11304,7 +11330,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
picker.
* Cut down on subsequent streaming lib reset packets transmitted
* Use a larger MTU more often
* Allow netDb searches to query shitlisted peers, as the queries are
* Allow netDb searches to query banlisted peers, as the queries are
indirect.
* Add an option to disable non-floodfill netDb searches (non-floodfill
searches are used by default, but can be disabled by adding
@ -11366,7 +11392,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
searches outside the floodfill set)
* Fix to the SSU IP detection code so we won't use introducers when we
don't need them (thanks Complication!)
* Add a brief shitlist to i2psnark so it doesn't keep on trying to reach
* Add a brief banlist to i2psnark so it doesn't keep on trying to reach
peers given to it
* Don't let netDb searches wander across too many peers
* Don't use the 1s bandwidth usage in the tunnel participation throttle,
@ -11925,7 +11951,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
left to the (work in progress) web UI.
2005-12-14 jrandom
* Fix to drop peer references when we shitlist people again (thanks zzz!)
* Fix to drop peer references when we banlist people again (thanks zzz!)
* Further I2PSnark fixes to deal with arbitrary torrent info attributes
(thanks Complication!)
@ -12024,7 +12050,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
separate jdom or rome, as they're inside syndie.war.
2005-11-30 jrandom
* Don't let the TCP transport alone shitlist a peer, since other
* Don't let the TCP transport alone banlist a peer, since other
transports may be working. Also display whether TCP connections are
inbound or outbound on the peers page.
* Fixed some substantial bugs in the SSU introducers where we wouldn't
@ -12092,7 +12118,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
address (read: SSU IP or port). This only offers minimal additional
protection against trivial attackers, but should provide functional
improvement for people who have periodic IP changes, since their new
router address would not be shitlisted while their old one would be.
router address would not be banlisted while their old one would be.
* Added further infrastructure for restricted route operation, but its use
is not recommended.
@ -12240,10 +12266,10 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* Improved the bandwidth throtting on tunnel participation, especially for
low bandwidth peers.
* Improved failure handling in SSU with proactive reestablishment of
failing idle peers, and rather than shitlisting a peer who failed too
failing idle peers, and rather than banlisting a peer who failed too
much, drop the SSU session and allow a new attempt (which, if it fails,
will cause a shitlisting)
* Clarify the cause of the shitlist on the profiles page, and include
will cause a banlisting)
* Clarify the cause of the banlist on the profiles page, and include
bandwidth limiter info at the bottom of the peers page.
2005-10-26 jrandom
@ -12256,7 +12282,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2005-10-25 jrandom
* Defer netDb searches for newly referenced peers until we actually want
them
* Ignore netDb references to peers on our shitlist
* Ignore netDb references to peers on our banlist
* Set the timeout for end to end client messages to the max delay after
finding the leaseSet, so we don't have as many expired messages floating
around.
@ -12471,7 +12497,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
reseeding - useful on OSes that make it hard to create dot files.
Thanks Complication (and anon)!
* Fixed the installer version string (thanks Frontier!)
* Added cleaner rejection of invalid IP addresses, shitlist those who send
* Added cleaner rejection of invalid IP addresses, banlist those who send
us invalid IP addresses, verify again that we are not sending invalid IP
addresses, and log an error if it happens. (Thanks Complication, ptm,
and adab!)
@ -12499,7 +12525,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
instead of k/cwin)
* Limit the number of inbound SSU sessions being built at once (using
half of the i2np.udp.maxConcurrentEstablish config prop)
* Don't shitlist on a message send failure alone (unless there aren't any
* Don't banlist on a message send failure alone (unless there aren't any
common transports).
* More careful bandwidth bursting
@ -12636,7 +12662,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2005-09-13 jrandom
* More careful error handling with introductions (thanks dust!)
* Fix the forceIntroducers checkbox on config.jsp (thanks Complication!)
* Hide the shitlist on the summary so it doesn't confuse new users.
* Hide the banlist on the summary so it doesn't confuse new users.
2005-09-12 comwiz
* Migrated the router tests to junit
@ -12743,8 +12769,8 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
2005-08-24 jrandom
* Catch errors with corrupt tunnel messages more gracefully (no need to
kill the thread and cause an OOM...)
* Don't skip shitlisted peers for netDb store messages, as they aren't
necessarily shitlisted by other people (though they probably are).
* Don't skip banlisted peers for netDb store messages, as they aren't
necessarily banlisted by other people (though they probably are).
* Adjust the netDb store per-peer timeout based on each particular peer's
profile (timeout = 4x their average netDb store response time)
* Don't republish leaseSets to *failed* peers - send them to peers who
@ -12787,14 +12813,14 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
netDb.
* Don't overwrite the status with 'unknown' unless we haven't had a valid
status in a while.
* Make sure to avoid shitlisted peers for peer testing.
* Make sure to avoid banlisted peers for peer testing.
* When we get an unknown result to a peer test, try again soon afterwards.
* When a peer tells us that our address is different from what we expect,
if we've done a recent peer test with a result of OK, fire off a peer
test to make sure our IP/port is still valid. If our test is old or the
result was not OK, accept their suggestion, but queue up a peer test for
later.
* Don't try to do a netDb store to a shitlisted peer, and adjust the way
* Don't try to do a netDb store to a banlisted peer, and adjust the way
we monitor netDb store progress (to clear up the high netDb.storePeers
stat)
@ -13426,7 +13452,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
* Fix a fencepost in the tunnel building so that e.g. a variance of
2 means +/- 2, not +/- 1 (thanks dm!)
* Avoid an NPE on client disconnect
* Never select a shitlisted peer to participate in a tunnel
* Never select a banlisted peer to participate in a tunnel
* Have netDb store messages timeout after 10s, not the full 60s (duh)
* Keep session tags around for a little longer, just in case (grr)
* Cleaned up some closing event issues on the streaming lib
@ -14276,7 +14302,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
(thanks Jonva!)
2004-10-04 jrandom
* Update the shitlist to reject a peer for an exponentially increasing
* Update the banlist to reject a peer for an exponentially increasing
period of time (with an upper bounds of an hour).
* Various minor stat and debugging fixes
@ -14290,7 +14316,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
such as "dbResponseTime" or "tunnelTestResponseTime".
2004-10-02 jrandom
* Assure that we quickly fail messages bound for shitlisted peers.
* Assure that we quickly fail messages bound for banlisted peers.
* Address a race on startup where the first peer contacted could hang the
router (thanks Romster!)
* Only whine about an intermittent inability to query the time server once

View File

@ -1,23 +1,23 @@
-----BEGIN CERTIFICATE-----
MIIDvjCCAyegAwIBAgICZhcwDQYJKoZIhvcNAQEFBQAwdTELMAkGA1UEBhMCVVMx
DTALBgNVBAgMBG5vbmUxDTALBgNVBAcMBG5vbmUxDTALBgNVBAoMBG5vbmUxDTAL
BgNVBAsMBG5vbmUxFTATBgNVBAMMDGkycC5tb29vLmNvbTETMBEGCSqGSIb3DQEJ
ARYEbm9uZTAeFw0xMTEwMjMyMTM2NDFaFw0xOTEwMjMyMTM2NDFaMGYxCzAJBgNV
BAYTAlVTMQ0wCwYDVQQIDARub25lMQ0wCwYDVQQKDARub25lMQ0wCwYDVQQLDARu
b25lMRUwEwYDVQQDDAxpMnAubW9vby5jb20xEzARBgkqhkiG9w0BCQEWBG5vbmUw
ggGPMA0GCSqGSIb3DQEBAQUAA4IBfAAwggF3AoIBbgMG1O7HRVa7UoiKbQTmKy5m
x79Na8vjD3etcOwfc4TSenQFvn+GbAWkJwKpM8uvOcgj1CxNeHWdSaeTFH1OwJsw
vl3leJ7clMdo3hpQDhPeGzBLyOiWwFHVn15YKa9xcM7S9Op5Q6rKBHUyyx1vGSz+
/NBmkktpI6rcGFfP3ISRL0auR+db+adWv4TS6W8YiwQIVZNbSlKP6FNO9Mv1kxQZ
KoHPn8vT/LtAh1fcI6ryBuy3F5oHfbGumIwsS5dpowryFxQzwg5vtMA7AMCMKyXv
hP/W6OuaaEP5MCIxkWjQs35gOYa8eF1dLoy3AD9yVVhoNrA8Bc5FnVFJ32Qv7agy
qRY85cXBA6hT/Qzs/wWwp7WrrnZuifaSv/u/Ayi5vX42/bf86PSM2IRNIESoA98A
NFz4U2KGq9s1K2JbkQmnFy8IU0w7CMq6PvNEm/uNjSk6OE1rcCXML+EuX0zmXy8d
PjRbLzC9csSg2CqMtQIDAQABo3sweTAJBgNVHRMEAjAAMCwGCWCGSAGG+EIBDQQf
Fh1PcGVuU1NMIEdlbmVyYXRlZCBDZXJ0aWZpY2F0ZTAdBgNVHQ4EFgQUdjuOczdG
hUpYzH0UXqKrOleT8GkwHwYDVR0jBBgwFoAU+SKWC49cM5sCodv89AFin3pkS0Yw
DQYJKoZIhvcNAQEFBQADgYEAKYyWlDIStjjbn/ZzVScKR174I8whTbdqrX/vp9dr
2hMv5m4F+aswX4Jr58WneKg2LvRaL6xEhoL7OAQ6aB/7xVSpDjIrrBLZd513NAam
X6bOPYJ6IH7Vw9ClFY3AlfzsNlgRMXno7rySKKzhg24kusNwKDH2yCphZy4BgjMn
y6A=
MIIDvTCCAqWgAwIBAgIJAOeW0ejPrHimMA0GCSqGSIb3DQEBCwUAMHUxCzAJBgNV
BAYTAlVTMQ0wCwYDVQQIDARub25lMQ0wCwYDVQQHDARub25lMQ0wCwYDVQQKDARu
b25lMQ0wCwYDVQQLDARub25lMRUwEwYDVQQDDAxpMnAubW9vby5jb20xEzARBgkq
hkiG9w0BCQEWBG5vbmUwHhcNMTUwMjA4MTczMzA5WhcNMTkwMzE5MTczMzA5WjB1
MQswCQYDVQQGEwJVUzENMAsGA1UECAwEbm9uZTENMAsGA1UEBwwEbm9uZTENMAsG
A1UECgwEbm9uZTENMAsGA1UECwwEbm9uZTEVMBMGA1UEAwwMaTJwLm1vb28uY29t
MRMwEQYJKoZIhvcNAQkBFgRub25lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAqxej7oRl9GOb8benIBCENrJXoow1iWhI9M+2nU0SaonrCDql5M2YMlwd
HzYUWtFbRjz2NinjB0fgFq9cfzHfr1Sc8k/OeGg1jvNfqt8wWo9tryQNjiHtDQUZ
6lQ5T13I+lj0CBasowgbApKQfrYjvaeuTaVYTfP8IVA60hoUQ+sy9JN+Unsx3/0Y
PLLd98+bT27qYuBNRB1g/ifUTd9Wosj2PevGBlCxYDaUjmCG4Q8kcQr87KvM6RTu
3AV61s/Wyy1j2YemlGG/ZhJ44YnlVMSu1vTjt9HInVf3lRRx/+RzbQO3lqeVC8LC
Bq3KbSlfJVx4vHslfHwBFw9A4rmD1QIDAQABo1AwTjAdBgNVHQ4EFgQUsSUvX0ED
yivB67iksVwZ+b8vLtQwHwYDVR0jBBgwFoAUsSUvX0EDyivB67iksVwZ+b8vLtQw
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAde4wts7Q8TylFEc38ftJ
2f285fFIR7P1SSbBcHPK2eBwLEg0zJyFrCeiHuEpPrn+d5GqL2zOskjfcESGmDBT
aFajj8jPBJj/AmpkdWJG6a1YKro5tu9wrlenGwHOHu2/Cl0IJvafxrOs2x4G+2Nl
5Hcw/FIy8mK7eIch4pACfi0zNMZ6KMCKfX9bxPrQo78WdBfVjbrIBlgyOQJ5NJEF
JlWvS7Butv7eERi4I2huN5VRJSCFzjbuO+tjP3I8IB6WgdBmTeqq8ObtXRgahBuD
ZmkvqVSfIzK5JN4GjO8FOdCBomuwm9A92kgmAptwQwAHM9qCDJpH8L07/7poxlGb
iA==
-----END CERTIFICATE-----

View File

@ -1,23 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIDvTCCAqWgAwIBAgIJAOeW0ejPrHimMA0GCSqGSIb3DQEBCwUAMHUxCzAJBgNV
BAYTAlVTMQ0wCwYDVQQIDARub25lMQ0wCwYDVQQHDARub25lMQ0wCwYDVQQKDARu
b25lMQ0wCwYDVQQLDARub25lMRUwEwYDVQQDDAxpMnAubW9vby5jb20xEzARBgkq
hkiG9w0BCQEWBG5vbmUwHhcNMTUwMjA4MTczMzA5WhcNMTkwMzE5MTczMzA5WjB1
MQswCQYDVQQGEwJVUzENMAsGA1UECAwEbm9uZTENMAsGA1UEBwwEbm9uZTENMAsG
A1UECgwEbm9uZTENMAsGA1UECwwEbm9uZTEVMBMGA1UEAwwMaTJwLm1vb28uY29t
MRMwEQYJKoZIhvcNAQkBFgRub25lMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
CgKCAQEAqxej7oRl9GOb8benIBCENrJXoow1iWhI9M+2nU0SaonrCDql5M2YMlwd
HzYUWtFbRjz2NinjB0fgFq9cfzHfr1Sc8k/OeGg1jvNfqt8wWo9tryQNjiHtDQUZ
6lQ5T13I+lj0CBasowgbApKQfrYjvaeuTaVYTfP8IVA60hoUQ+sy9JN+Unsx3/0Y
PLLd98+bT27qYuBNRB1g/ifUTd9Wosj2PevGBlCxYDaUjmCG4Q8kcQr87KvM6RTu
3AV61s/Wyy1j2YemlGG/ZhJ44YnlVMSu1vTjt9HInVf3lRRx/+RzbQO3lqeVC8LC
Bq3KbSlfJVx4vHslfHwBFw9A4rmD1QIDAQABo1AwTjAdBgNVHQ4EFgQUsSUvX0ED
yivB67iksVwZ+b8vLtQwHwYDVR0jBBgwFoAUsSUvX0EDyivB67iksVwZ+b8vLtQw
DAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAde4wts7Q8TylFEc38ftJ
2f285fFIR7P1SSbBcHPK2eBwLEg0zJyFrCeiHuEpPrn+d5GqL2zOskjfcESGmDBT
aFajj8jPBJj/AmpkdWJG6a1YKro5tu9wrlenGwHOHu2/Cl0IJvafxrOs2x4G+2Nl
5Hcw/FIy8mK7eIch4pACfi0zNMZ6KMCKfX9bxPrQo78WdBfVjbrIBlgyOQJ5NJEF
JlWvS7Butv7eERi4I2huN5VRJSCFzjbuO+tjP3I8IB6WgdBmTeqq8ObtXRgahBuD
ZmkvqVSfIzK5JN4GjO8FOdCBomuwm9A92kgmAptwQwAHM9qCDJpH8L07/7poxlGb
iA==
-----END CERTIFICATE-----

View File

@ -95,3 +95,4 @@ certificates/ssl/i2p-netdb.innovatio.no.crt
certificates/ssl/jp.reseed.i2p2.no.crt
certificates/ssl/ieb9oopo.mooo.com2.crt
certificates/ssl/netdb.i2p2.no2.crt
certificates/ssl/i2p.mooo.com2.crt

View File

@ -49,7 +49,7 @@ public class DataMessage extends FastI2NPMessageImpl {
long size = DataHelper.fromLong(data, curIndex, 4);
curIndex += 4;
if (size > MAX_SIZE)
throw new I2NPMessageException("wtf, size=" + size);
throw new I2NPMessageException("too large msg, size=" + size);
_data = new byte[(int)size];
System.arraycopy(data, curIndex, _data, 0, (int)size);
}

View File

@ -357,8 +357,8 @@ public class DeliveryInstructions extends DataStructureImpl {
int offset = 0;
offset += getAdditionalInfo(rv, offset);
if (offset != additionalSize)
//_log.log(Log.CRIT, "wtf, additionalSize = " + additionalSize + ", offset = " + offset);
throw new IllegalStateException("wtf, additionalSize = " + additionalSize + ", offset = " + offset);
//_log.log(Log.CRIT, "size mismatch, additionalSize = " + additionalSize + ", offset = " + offset);
throw new IllegalStateException("size mismatch, additionalSize = " + additionalSize + ", offset = " + offset);
return rv;
}

View File

@ -164,7 +164,7 @@ public class I2NPMessageReader {
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();
} catch (Exception e) {
_log.log(Log.CRIT, "wtf, error reading", e);
_log.log(Log.CRIT, "error reading msg!", e);
_listener.readError(I2NPMessageReader.this, e);
_listener.disconnected(I2NPMessageReader.this);
cancelRunner();

View File

@ -62,7 +62,7 @@ public class TunnelGatewayMessage extends FastI2NPMessageImpl {
if (_msg != null)
throw new IllegalStateException();
if (msg == null)
throw new IllegalArgumentException("wtf, dont set me to null");
throw new IllegalArgumentException("dont set me to null!");
_msg = msg;
}
@ -137,7 +137,7 @@ public class TunnelGatewayMessage extends FastI2NPMessageImpl {
//handler.readMessage(data, curIndex);
//_msg = handler.lastRead();
//if (_msg == null)
// throw new I2NPMessageException("wtf, message read has no payload?");
// throw new I2NPMessageException("impossible? message read has no payload?!");
// NEW WAY save lots of effort at the IBGW by reading as an UnknownI2NPMessage instead
// This will save a lot of object churn and processing,

View File

@ -48,7 +48,7 @@ public class UnknownI2NPMessage extends FastI2NPMessageImpl {
throw new IllegalStateException();
if (type != _type) throw new I2NPMessageException("Message type is incorrect for this message");
if (dataSize > MAX_SIZE)
throw new I2NPMessageException("wtf, size=" + dataSize);
throw new I2NPMessageException("size mismatch, too big, size=" + dataSize);
_data = new byte[dataSize];
System.arraycopy(data, offset, _data, 0, dataSize);
}

View File

@ -308,7 +308,7 @@ public class RouterInfo extends DatabaseEntry {
*/
protected byte[] getBytes() throws DataFormatException {
if (_byteified != null) return _byteified;
if (_identity == null) throw new DataFormatException("Router identity isn't set? wtf!");
if (_identity == null) throw new DataFormatException("Router identity isn't set?!");
//long before = Clock.getInstance().now();
ByteArrayOutputStream out = new ByteArrayOutputStream(2*1024);

View File

@ -162,11 +162,11 @@ public class Banlist {
*/
public boolean banlistRouter(Hash peer, String reason, String reasonCode, String transport, long expireOn) {
if (peer == null) {
_log.error("wtf, why did we try to banlist null?", new Exception("banfaced"));
_log.error("why did we try to banlist null?", new Exception("banfaced"));
return false;
}
if (peer.equals(_context.routerHash())) {
_log.error("wtf, why did we try to banlist ourselves?", new Exception("banfaced"));
_log.error("why did we try to banlist ourselves?", new Exception("banfaced"));
return false;
}
boolean wasAlready = false;

View File

@ -591,7 +591,7 @@ public class JobQueue {
} catch (Throwable t) {
_context.clock().removeUpdateListener(this);
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, pumper killed", t);
_log.error("pumper killed?!", t);
}
}

View File

@ -117,7 +117,7 @@ class JobQueueRunner extends I2PThread {
//if ( (jobNum % 10) == 0)
// System.gc();
} catch (Throwable t) {
_log.log(Log.CRIT, "WTF, error running?", t);
_log.log(Log.CRIT, "error running?", t);
}
}
//_state = 16;

View File

@ -852,7 +852,7 @@ public class Router implements RouterClock.ClockShiftListener {
addCapabilities(ri);
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
if (key == null) {
_log.log(Log.CRIT, "Internal error - signing private key not known? wtf");
_log.log(Log.CRIT, "Internal error - signing private key not known? Impossible?");
return;
}
ri.sign(key);

View File

@ -18,7 +18,7 @@ public class RouterVersion {
/** deprecated */
public final static String ID = "Monotone";
public final static String VERSION = CoreVersion.VERSION;
public final static long BUILD = 2;
public final static long BUILD = 5;
/** for example "-test" */
public final static String EXTRA = "";

View File

@ -122,7 +122,7 @@ public class VMCommSystem extends CommSystemFacade {
_ctx.inNetMessagePool().add(msg, null, _from);
} catch (Exception e) {
_log.error("wtf, error reading/formatting a VM message?", e);
_log.error("Error reading/formatting a VM message? Something is not right...", e);
}
}
public String getName() { return "Receive Message"; }

View File

@ -43,22 +43,40 @@ public class SendMessageDirectJob extends JobImpl {
private boolean _sent;
private long _searchOn;
/**
* @param toPeer may be ourselves
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, int timeoutMs, int priority) {
this(ctx, message, toPeer, null, null, null, null, timeoutMs, priority);
}
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
/**
* @param toPeer may be ourselves
* @param onSuccess may be null
* @param onFail may be null
* @param selector be null
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority) {
this(ctx, message, toPeer, null, onSuccess, onFail, selector, timeoutMs, priority);
}
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess, Job onFail, MessageSelector selector, int timeoutMs, int priority) {
/**
* @param toPeer may be ourselves
* @param onSend may be null
* @param onSuccess may be null
* @param onFail may be null
* @param selector be null
*/
public SendMessageDirectJob(RouterContext ctx, I2NPMessage message, Hash toPeer, Job onSend, ReplyJob onSuccess,
Job onFail, MessageSelector selector, int timeoutMs, int priority) {
super(ctx);
_log = getContext().logManager().getLog(SendMessageDirectJob.class);
_message = message;
_targetHash = toPeer;
if (timeoutMs < 10*1000) {
if (_log.shouldLog(Log.WARN))
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy bastard"));
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy caller!"));
_expiration = ctx.clock().now() + 10*1000;
} else {
_expiration = timeoutMs + ctx.clock().now();

View File

@ -28,7 +28,7 @@ public class FloodfillDatabaseStoreMessageHandler implements HandlerJobBuilder {
_context = context;
_facade = facade;
// following are for HFDSMJ
context.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeHandled", "How many netDb store messages have we handled?", "NetworkDatabase", new long[] { 60*1000, 60*60*1000l });
context.statManager().createRateStat("netDb.storeLeaseSetHandled", "How many leaseSet store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeRouterInfoHandled", "How many routerInfo store messages have we handled?", "NetworkDatabase", new long[] { 60*60*1000l });
context.statManager().createRateStat("netDb.storeRecvTime", "How long it takes to handle the local store part of a dbStore?", "NetworkDatabase", new long[] { 60*60*1000l });

View File

@ -6,6 +6,7 @@ import net.i2p.crypto.SigType;
import net.i2p.data.Hash;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
@ -55,13 +56,27 @@ class FloodfillMonitorJob extends JobImpl {
} else {
getContext().router().eventLog().addEvent(EventLog.NOT_FLOODFILL);
}
getContext().router().rebuildRouterInfo();
getContext().router().rebuildRouterInfo(true);
Job routerInfoFlood = new FloodfillRouterInfoFloodJob(getContext(), _facade);
if(getContext().router().getUptime() < 5*60*1000) {
// Needed to prevent race if router.floodfillParticipant=true (not auto)
routerInfoFlood.getTiming().setStartAfter(getContext().clock().now() + 5*60*1000);
getContext().jobQueue().addJob(routerInfoFlood);
if(_log.shouldLog(Log.DEBUG)) {
_log.logAlways(Log.DEBUG, "Deferring our FloodfillRouterInfoFloodJob run because of low uptime.");
}
} else {
routerInfoFlood.runJob();
if(_log.shouldLog(Log.DEBUG)) {
_log.logAlways(Log.DEBUG, "Running FloodfillRouterInfoFloodJob");
}
}
}
if (_log.shouldLog(Log.INFO))
_log.info("Should we be floodfill? " + ff);
int delay = (REQUEUE_DELAY / 2) + getContext().random().nextInt(REQUEUE_DELAY);
// there's a lot of eligible non-floodfills, keep them from all jumping in at once
// To do: somehow assess the size of the network to make this adaptive?
// TODO: somehow assess the size of the network to make this adaptive?
if (!ff)
delay *= 4; // this was 7, reduced for moar FFs --zab
requeue(delay);

View File

@ -40,7 +40,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
* Was 7 through release 0.9; 5 for 0.9.1.
* 4 as of 0.9.2; 3 as of 0.9.9
*/
private static final int MAX_TO_FLOOD = 3;
public static final int MAX_TO_FLOOD = 3;
private static final int FLOOD_PRIORITY = OutNetMessage.PRIORITY_NETDB_FLOOD;
private static final int FLOOD_TIMEOUT = 30*1000;
@ -129,7 +129,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
*/
@Override
public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException {
if (localRouterInfo == null) throw new IllegalArgumentException("wtf, null localRouterInfo?");
if (localRouterInfo == null) throw new IllegalArgumentException("impossible: null localRouterInfo?");
// should this be after super? why not publish locally?
if (_context.router().isHidden()) return; // DE-nied!
super.publish(localRouterInfo);

View File

@ -0,0 +1,63 @@
package net.i2p.router.networkdb.kademlia;
import java.util.Collections;
import java.util.List;
import net.i2p.data.Hash;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterInfo;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.Router;
import net.i2p.router.RouterContext;
import net.i2p.stat.Rate;
import net.i2p.stat.RateStat;
import net.i2p.util.Log;
/**
* Job to flood nearby floodfill routers with our RI.
* Speeds up integration of new ffs. Created for #1195.
* Also called when opting out of ff to call off the hounds ASAP.
* Currently floods FNDF.MAX_TO_FLOOD * 2 routers nearest to us.
*
*/
class FloodfillRouterInfoFloodJob extends JobImpl {
private final Log _log;
private final FloodfillNetworkDatabaseFacade _facade;
private static final int FLOOD_PEERS = 2 * FloodfillNetworkDatabaseFacade.MAX_TO_FLOOD;
public FloodfillRouterInfoFloodJob(RouterContext context, FloodfillNetworkDatabaseFacade facade) {
super(context);
_facade = facade;
_log = context.logManager().getLog(FloodfillRouterInfoFloodJob.class);
}
public String getName() { return "Flood our RouterInfo to nearby floodfills"; }
public void runJob() {
FloodfillPeerSelector sel = (FloodfillPeerSelector)_facade.getPeerSelector();
DatabaseStoreMessage dsm;
OutNetMessage outMsg;
RouterInfo nextPeerInfo;
List<Hash> peers = sel.selectFloodfillParticipants(getContext().routerHash(), FLOOD_PEERS, null);
for(Hash ri: peers) {
// Iterate through list of nearby (ff) peers
dsm = new DatabaseStoreMessage(getContext());
dsm.setMessageExpiration(getContext().clock().now() + 10*1000);
dsm.setEntry(getContext().router().getRouterInfo());
nextPeerInfo = getContext().netDb().lookupRouterInfoLocally(ri);
if(nextPeerInfo == null) {
continue;
}
outMsg = new OutNetMessage(getContext(), dsm, getContext().clock().now()+10*1000, OutNetMessage.PRIORITY_MY_NETDB_STORE, nextPeerInfo);
getContext().outNetMessagePool().add(outMsg); // Whoosh!
if(_log.shouldLog(Log.DEBUG)) {
_log.logAlways(Log.DEBUG, "Sending our RI to: " + nextPeerInfo.getHash());
}
}
}
}

View File

@ -14,14 +14,19 @@ import java.util.Date;
import net.i2p.data.DatabaseEntry;
import net.i2p.data.Hash;
import net.i2p.data.LeaseSet;
import net.i2p.data.TunnelId;
import net.i2p.data.router.RouterAddress;
import net.i2p.data.router.RouterIdentity;
import net.i2p.data.router.RouterInfo;
import net.i2p.data.i2np.DatabaseStoreMessage;
import net.i2p.data.i2np.DeliveryStatusMessage;
import net.i2p.data.i2np.TunnelGatewayMessage;
import net.i2p.router.Job;
import net.i2p.router.JobImpl;
import net.i2p.router.OutNetMessage;
import net.i2p.router.RouterContext;
import net.i2p.router.TunnelInfo;
import net.i2p.router.message.SendMessageDirectJob;
import net.i2p.util.Log;
/**
@ -34,8 +39,15 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
private final RouterIdentity _from;
private Hash _fromHash;
private final FloodfillNetworkDatabaseFacade _facade;
private final static int REPLY_TIMEOUT = 60*1000;
private final static int MESSAGE_PRIORITY = OutNetMessage.PRIORITY_NETDB_REPLY;
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage, RouterIdentity from, Hash fromHash, FloodfillNetworkDatabaseFacade facade) {
/**
* @param receivedMessage must never have reply token set if it came down a tunnel
*/
public HandleFloodfillDatabaseStoreMessageJob(RouterContext ctx, DatabaseStoreMessage receivedMessage,
RouterIdentity from, Hash fromHash,
FloodfillNetworkDatabaseFacade facade) {
super(ctx);
_log = ctx.logManager().getLog(getClass());
_message = receivedMessage;
@ -136,6 +148,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// somebody has our keys...
if (getContext().routerHash().equals(key)) {
//getContext().statManager().addRateData("netDb.storeLocalRouterInfoAttempt", 1, 0);
// This is initiated by PeerTestJob from another peer
// throw rather than return, so that we send the ack below (prevent easy attack)
dontBlamePeer = true;
throw new IllegalArgumentException("Peer attempted to store our RouterInfo");
@ -170,15 +183,18 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
if (_log.shouldLog(Log.ERROR))
_log.error("Invalid DatabaseStoreMessage data type - " + entry.getType()
+ ": " + _message);
// don't ack or flood
return;
}
long recvEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeRecvTime", recvEnd-recvBegin);
// ack even if invalid or unsupported
// ack even if invalid
// in particular, ack our own RI (from PeerTestJob)
// TODO any cases where we shouldn't?
if (_message.getReplyToken() > 0)
sendAck();
sendAck(key);
long ackEnd = System.currentTimeMillis();
if (_from != null)
@ -215,7 +231,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// ERR: see comment in HandleDatabaseLookupMessageJob regarding hidden mode
//else if (!_message.getRouterInfo().isHidden())
long floodEnd = System.currentTimeMillis();
getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin);
getContext().statManager().addRateData("netDb.storeFloodNew", floodEnd-floodBegin, 60*1000);
} else {
// don't flood it *again*
getContext().statManager().addRateData("netDb.storeFloodOld", 1);
@ -223,7 +239,7 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
}
}
private void sendAck() {
private void sendAck(Hash storedKey) {
DeliveryStatusMessage msg = new DeliveryStatusMessage(getContext());
msg.setMessageId(_message.getReplyToken());
// Randomize for a little protection against clock-skew fingerprinting.
@ -231,31 +247,62 @@ public class HandleFloodfillDatabaseStoreMessageJob extends JobImpl {
// TODO just set to 0?
// TODO we have no session to garlic wrap this with, needs new message
msg.setArrival(getContext().clock().now() - getContext().random().nextInt(3*1000));
/*
if (FloodfillNetworkDatabaseFacade.floodfillEnabled(getContext())) {
// no need to do anything but send it where they ask
// may be null
TunnelId replyTunnel = _message.getReplyTunnel();
// A store of our own RI, only if we are not FF
DatabaseStoreMessage msg2;
if (getContext().netDb().floodfillEnabled() ||
storedKey.equals(getContext().routerHash())) {
// don't send our RI if the store was our RI (from PeerTestJob)
msg2 = null;
} else {
// we aren't ff, send a go-away message
msg2 = new DatabaseStoreMessage(getContext());
RouterInfo me = getContext().router().getRouterInfo();
msg2.setEntry(me);
if (_log.shouldWarn())
_log.warn("Got a store w/ reply token, but we aren't ff: from: " + _from +
" fromHash: " + _fromHash + " msg: " + _message, new Exception());
}
Hash toPeer = _message.getReplyGateway();
boolean toUs = getContext().routerHash().equals(toPeer);
// to reduce connection congestion, send directly if connected already,
// else through an exploratory tunnel.
if (toUs && replyTunnel != null) {
// if we are the gateway, act as if we received it
TunnelGatewayMessage tgm = new TunnelGatewayMessage(getContext());
tgm.setMessage(msg);
tgm.setTunnelId(_message.getReplyTunnel());
tgm.setTunnelId(replyTunnel);
tgm.setMessageExpiration(msg.getMessageExpiration());
getContext().jobQueue().addJob(new SendMessageDirectJob(getContext(), tgm, _message.getReplyGateway(), 10*1000, 200));
getContext().tunnelDispatcher().dispatch(tgm);
if (msg2 != null) {
TunnelGatewayMessage tgm2 = new TunnelGatewayMessage(getContext());
tgm2.setMessage(msg2);
tgm2.setTunnelId(replyTunnel);
tgm2.setMessageExpiration(msg.getMessageExpiration());
getContext().tunnelDispatcher().dispatch(tgm2);
}
} else if (toUs || getContext().commSystem().isEstablished(toPeer)) {
Job send = new SendMessageDirectJob(getContext(), msg, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send.runJob();
if (msg2 != null) {
Job send2 = new SendMessageDirectJob(getContext(), msg2, toPeer, REPLY_TIMEOUT, MESSAGE_PRIORITY);
send2.runJob();
}
} else {
*/
TunnelInfo outTunnel = selectOutboundTunnel();
// pick tunnel with endpoint closest to toPeer
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(toPeer);
if (outTunnel == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("No outbound tunnel could be found");
return;
} else {
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0),
_message.getReplyTunnel(), _message.getReplyGateway());
}
//}
}
private TunnelInfo selectOutboundTunnel() {
return getContext().tunnelManager().selectOutboundTunnel();
getContext().tunnelDispatcher().dispatchOutbound(msg, outTunnel.getSendTunnelId(0),
replyTunnel, toPeer);
if (msg2 != null)
getContext().tunnelDispatcher().dispatchOutbound(msg2, outTunnel.getSendTunnelId(0),
replyTunnel, toPeer);
}
}
public String getName() { return "Handle Database Store Message"; }

View File

@ -58,7 +58,7 @@ class IterativeLookupJob extends JobImpl {
continue;
}
if (peer.equals(from)) {
// wtf
// unusual
invalidPeers++;
continue;
}

View File

@ -646,7 +646,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
try {
store(h, localLeaseSet);
} catch (IllegalArgumentException iae) {
_log.error("wtf, locally published leaseSet is not valid?", iae);
_log.error("locally published leaseSet is not valid?", iae);
throw iae;
}
if (!_context.clientManager().shouldPublishLeaseSet(h))

View File

@ -484,7 +484,7 @@ class PersistentDataStore extends TransientDataStore {
// don't overwrite recent netdb RIs with reseed data
return fileDate > _knownDate + (60*60*1000);
} else {
// wtf - prevent injection from reseeding
// safety measure - prevent injection from reseeding
_log.error("Prevented LS overwrite by RI " + _key + " from " + _routerFile);
return false;
}

View File

@ -94,7 +94,7 @@ class SearchJob extends JobImpl {
Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
super(context);
if ( (key == null) || (key.getData() == null) )
throw new IllegalArgumentException("Search for null key? wtf");
throw new IllegalArgumentException("Search for null key?");
_log = getContext().logManager().getLog(getClass());
_facade = facade;
_state = new SearchState(getContext(), key);
@ -425,7 +425,7 @@ class SearchJob extends JobImpl {
Hash to = router.getIdentity().getHash();
TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
if (inTunnel == null) {
_log.warn("No tunnels to get search replies through! wtf!");
_log.warn("No tunnels to get search replies through!");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
return;
}
@ -436,7 +436,7 @@ class SearchJob extends JobImpl {
//RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
//if (inGateway == null) {
// _log.error("We can't find the gateway to our inbound tunnel?! wtf");
// _log.error("We can't find the gateway to our inbound tunnel?!");
// getContext().jobQueue().addJob(new FailedJob(getContext(), router));
// return;
//}
@ -448,7 +448,7 @@ class SearchJob extends JobImpl {
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
if (outTunnel == null) {
_log.warn("No tunnels to send search out through! wtf!");
_log.warn("No tunnels to send search out through! Impossible?");
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
return;
}

View File

@ -101,7 +101,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + message);
_log.error(getJobId() + ": What?! Reply job matched a strange message: " + message);
return;
}

View File

@ -42,7 +42,7 @@ class SingleLookupJob extends JobImpl {
Hash peer = _dsrm.getReply(i);
if (peer.equals(getContext().routerHash())) // us
continue;
if (peer.equals(from)) // wtf
if (peer.equals(from)) // unusual?
continue;
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
if (ri == null)

View File

@ -173,7 +173,7 @@ class CapacityCalculator {
case 30*60*1000: return .3;
case 60*60*1000: return .2;
case 24*60*60*1000: return .1;
default: throw new IllegalArgumentException("wtf, period [" + period + "]???");
default: throw new IllegalArgumentException("undefined period passed, period [" + period + "]???");
}
}
}

View File

@ -24,7 +24,8 @@ import net.i2p.util.Log;
* selection to the peer manager and tests the peer by sending it a useless
* database store message
*
* TODO - What's the point? Disable this? See also notes in PeerManager.selectPeers()
* TODO - What's the point? Disable this? See also notes in PeerManager.selectPeers().
* TODO - Use something besides sending the peer's RI to itself?
*/
public class PeerTestJob extends JobImpl {
private final Log _log;
@ -82,6 +83,7 @@ public class PeerTestJob extends JobImpl {
/**
* Retrieve a group of 0 or more peers that we want to test.
* Returned list will not include ourselves.
*
* @return set of RouterInfo structures
*/
@ -110,12 +112,13 @@ public class PeerTestJob extends JobImpl {
/**
* Fire off the necessary jobs and messages to test the given peer
*
* The message is a store of the peer's RI to itself,
* with a reply token.
*/
private void testPeer(RouterInfo peer) {
TunnelInfo inTunnel = getInboundTunnelId();
if (inTunnel == null) {
_log.warn("No tunnels to get peer test replies through! wtf!");
_log.warn("No tunnels to get peer test replies through!");
return;
}
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
@ -123,19 +126,19 @@ public class PeerTestJob extends JobImpl {
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
if (inGateway == null) {
if (_log.shouldLog(Log.WARN))
_log.warn("We can't find the gateway to our inbound tunnel?! wtf");
_log.warn("We can't find the gateway to our inbound tunnel?! Impossible?");
return;
}
int timeoutMs = getTestTimeout();
long expiration = getContext().clock().now() + timeoutMs;
long nonce = getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE);
long nonce = 1 + getContext().random().nextLong(I2NPMessage.MAX_ID_VALUE - 1);
DatabaseStoreMessage msg = buildMessage(peer, inTunnelId, inGateway.getIdentity().getHash(), nonce, expiration);
TunnelInfo outTunnel = getOutboundTunnelId();
if (outTunnel == null) {
_log.warn("No tunnels to send search out through! wtf!");
_log.warn("No tunnels to send search out through! Something is wrong...");
return;
}
@ -172,7 +175,9 @@ public class PeerTestJob extends JobImpl {
}
/**
* Build a message to test the peer with
* Build a message to test the peer with.
* The message is a store of the peer's RI to itself,
* with a reply token.
*/
private DatabaseStoreMessage buildMessage(RouterInfo peer, TunnelId replyTunnel, Hash replyGateway, long nonce, long expiration) {
DatabaseStoreMessage msg = new DatabaseStoreMessage(getContext());

View File

@ -83,7 +83,7 @@ class LoadRouterInfoJob extends JobImpl {
try {
// if we have a routerinfo but no keys, things go bad in a hurry:
// CRIT ...rkdb.PublishLocalRouterInfoJob: Internal error - signing private key not known? rescheduling publish for 30s
// CRIT net.i2p.router.Router : Internal error - signing private key not known? wtf
// CRIT net.i2p.router.Router : Internal error - signing private key not known? Impossible?
// CRIT ...sport.udp.EstablishmentManager: Error in the establisher java.lang.NullPointerException
// at net.i2p.router.transport.udp.PacketBuilder.buildSessionConfirmedPacket(PacketBuilder.java:574)
// so pretend the RI isn't there if there is no keyfile

View File

@ -55,6 +55,7 @@ class NtpClient {
/** difference between the unix epoch and jan 1 1900 (NTP uses that) */
private final static double SECONDS_1900_TO_EPOCH = 2208988800.0;
private final static int NTP_PORT = 123;
private static final int DEFAULT_TIMEOUT = 10*1000;
/**
* Query the ntp servers, returning the current time from first one we find
@ -84,7 +85,7 @@ class NtpClient {
* @throws IllegalArgumentException if none of the servers are reachable
* @since 0.7.12
*/
public static long[] currentTimeAndStratum(String serverNames[]) {
public static long[] currentTimeAndStratum(String serverNames[], int perServerTimeout) {
if (serverNames == null)
throw new IllegalArgumentException("No NTP servers specified");
ArrayList<String> names = new ArrayList<String>(serverNames.length);
@ -92,7 +93,7 @@ class NtpClient {
names.add(serverNames[i]);
Collections.shuffle(names);
for (int i = 0; i < names.size(); i++) {
long[] rv = currentTimeAndStratum(names.get(i));
long[] rv = currentTimeAndStratum(names.get(i), perServerTimeout);
if (rv != null && rv[0] > 0)
return rv;
}
@ -105,7 +106,7 @@ class NtpClient {
* @return milliseconds since january 1, 1970 (UTC), or -1 on error
*/
public static long currentTime(String serverName) {
long[] la = currentTimeAndStratum(serverName);
long[] la = currentTimeAndStratum(serverName, DEFAULT_TIMEOUT);
if (la != null)
return la[0];
return -1;
@ -116,7 +117,7 @@ class NtpClient {
* @return time in rv[0] and stratum in rv[1], or null for error
* @since 0.7.12
*/
private static long[] currentTimeAndStratum(String serverName) {
private static long[] currentTimeAndStratum(String serverName, int timeout) {
DatagramSocket socket = null;
try {
// Send request
@ -135,7 +136,7 @@ class NtpClient {
// Get response
packet = new DatagramPacket(buf, buf.length);
socket.setSoTimeout(10*1000);
socket.setSoTimeout(timeout);
socket.receive(packet);
// Immediately record the incoming timestamp

View File

@ -43,6 +43,8 @@ public class RouterTimestamper extends Timestamper {
/** how many times do we have to query if we are changing the clock? */
private static final int DEFAULT_CONCURRING_SERVERS = 3;
private static final int MAX_CONSECUTIVE_FAILS = 10;
private static final int DEFAULT_TIMEOUT = 10*1000;
private static final int SHORT_TIMEOUT = 5*1000;
public static final String PROP_QUERY_FREQUENCY = "time.queryFrequencyMs";
public static final String PROP_SERVER_LIST = "time.sntpServerList";
@ -177,7 +179,7 @@ public class RouterTimestamper extends Timestamper {
if (_log != null && _log.shouldDebug())
_log.debug("Querying servers " + servers);
try {
lastFailed = !queryTime(servers.toArray(new String[servers.size()]));
lastFailed = !queryTime(servers.toArray(new String[servers.size()]), SHORT_TIMEOUT);
} catch (IllegalArgumentException iae) {
if (!lastFailed && _log != null && _log.shouldWarn())
_log.warn("Unable to reach any regional NTP servers: " + servers);
@ -192,7 +194,7 @@ public class RouterTimestamper extends Timestamper {
if (_log != null && _log.shouldDebug())
_log.debug("Querying servers " + _servers);
try {
lastFailed = !queryTime(_servers.toArray(new String[_servers.size()]));
lastFailed = !queryTime(_servers.toArray(new String[_servers.size()]), DEFAULT_TIMEOUT);
} catch (IllegalArgumentException iae) {
lastFailed = true;
}
@ -259,18 +261,18 @@ public class RouterTimestamper extends Timestamper {
/**
* True if the time was queried successfully, false if it couldn't be
*/
private boolean queryTime(String serverList[]) throws IllegalArgumentException {
private boolean queryTime(String serverList[], int perServerTimeout) throws IllegalArgumentException {
long found[] = new long[_concurringServers];
long now = -1;
int stratum = -1;
long expectedDelta = 0;
_wellSynced = false;
for (int i = 0; i < _concurringServers; i++) {
if (i > 0) {
// this delays startup when net is disconnected or the timeserver list is bad, don't make it too long
try { Thread.sleep(2*1000); } catch (InterruptedException ie) {}
}
long[] timeAndStratum = NtpClient.currentTimeAndStratum(serverList);
//if (i > 0) {
// // this delays startup when net is disconnected or the timeserver list is bad, don't make it too long
// try { Thread.sleep(2*1000); } catch (InterruptedException ie) {}
//}
long[] timeAndStratum = NtpClient.currentTimeAndStratum(serverList, perServerTimeout);
now = timeAndStratum[0];
stratum = (int) timeAndStratum[1];
long delta = now - _context.clock().now();

View File

@ -58,7 +58,7 @@ class GetBidsJob extends JobImpl {
Hash us = context.routerHash();
if (to.equals(us)) {
if (log.shouldLog(Log.ERROR))
log.error("wtf, send a message to ourselves? nuh uh. msg = " + msg);
log.error("send a message to ourselves? nuh uh. msg = " + msg);
context.statManager().addRateData("transport.bidFailSelf", msg.getLifetime());
fail(context, msg);
return;

View File

@ -195,9 +195,9 @@ public class OutboundMessageRegistry {
*/
private void registerPending(OutNetMessage msg, boolean allowEmpty) {
if ( (!allowEmpty) && (msg.getMessage() == null) )
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf");
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? Impossible?");
MessageSelector sel = msg.getReplySelector();
if (sel == null) throw new IllegalArgumentException("No reply selector? wtf");
if (sel == null) throw new IllegalArgumentException("No reply selector? Impossible?");
if (!_activeMessages.add(msg))
return; // dont add dups

View File

@ -373,9 +373,9 @@ public abstract class TransportImpl implements Transport {
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
+ msg.getFailedTransports() + " and succeeding on " + getStyle());
if ( (allTime > 60*1000) && (sendSuccessful) ) {
// WTF!!@#
// VERY slow
if (_log.shouldLog(Log.WARN))
_log.warn("WTF, more than a minute slow? " + msg.getMessageType()
_log.warn("Severe latency? More than a minute slow? " + msg.getMessageType()
+ " of id " + msg.getMessageId() + " (send begin on "
+ new Date(msg.getSendBegin()) + " / created on "
+ new Date(msg.getCreated()) + "): " + msg);
@ -497,7 +497,7 @@ public abstract class TransportImpl implements Transport {
_listener.messageReceived(inMsg, remoteIdent, remoteIdentHash);
} else {
if (_log.shouldLog(Log.ERROR))
_log.error("WTF! Null listener! this = " + toString(), new Exception("Null listener"));
_log.error("Null listener! this = " + toString(), new Exception("Null listener"));
}
}

View File

@ -530,7 +530,7 @@ public class TransportManager implements TransportEventListener {
if (msg == null)
throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!");
if (_context.router().getRouterInfo().equals(msg.getTarget()))
throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?");
throw new IllegalArgumentException("Bids for a message bound to ourselves?");
List<TransportBid> rv = new ArrayList<TransportBid>(_transports.size());
Set<String> failedTransports = msg.getFailedTransports();

View File

@ -100,7 +100,7 @@ class NTCPSendFinisher {
// appx 0.1 ms
//_context.statManager().addRateData("ntcp.sendFinishTime", _context.clock().now() - _queued, 0);
} catch (Throwable t) {
_log.log(Log.CRIT, " wtf, afterSend borked", t);
_log.log(Log.CRIT, " afterSend broken?", t);
}
}
}

View File

@ -168,7 +168,7 @@ class ACKSender implements Runnable {
if (wanted < 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("wtf, why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
_log.warn("why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
continue;
}

View File

@ -178,7 +178,7 @@ class OutboundMessageFragments {
public void add(OutboundMessageState state) {
PeerState peer = state.getPeer();
if (peer == null)
throw new RuntimeException("wtf, null peer for " + state);
throw new RuntimeException("null peer for " + state);
peer.add(state);
add(peer);
//_context.statManager().addRateData("udp.outboundActiveCount", active, 0);

View File

@ -370,7 +370,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
if (offset <= 0) {
StringBuilder buf = new StringBuilder(128);
buf.append("wtf, written offset is ").append(offset);
buf.append("uh? written offset is ").append(offset);
buf.append(" for ").append(startAt).append(" through ").append(sendThrough);
for (int i = startAt; i <= sendThrough; i++) {
buf.append(" ").append(pending.get(i).toString());

View File

@ -164,7 +164,7 @@ class FragmentHandler {
if (_log.shouldLog(Log.ERROR))
_log.error("Corrupt fragment received: offset = " + offset, e);
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
// java.lang.IllegalStateException: wtf, don't get the completed size when we're not complete - null fragment i=0 of 1
// java.lang.IllegalStateException: don't get the completed size when we're not complete - null fragment i=0 of 1
// at net.i2p.router.tunnel.FragmentedMessage.getCompleteSize(FragmentedMessage.java:194)
// at net.i2p.router.tunnel.FragmentedMessage.toByteArray(FragmentedMessage.java:223)
// at net.i2p.router.tunnel.FragmentHandler.receiveComplete(FragmentHandler.java:380)

View File

@ -164,7 +164,7 @@ class FragmentedMessage {
}
public int getCompleteSize() {
if (!_lastReceived)
throw new IllegalStateException("wtf, don't get the completed size when we're not complete");
throw new IllegalStateException("don't get the completed size when we're not complete!");
if (_releasedAfter > 0) {
RuntimeException e = new RuntimeException("use after free in FragmentedMessage");
_log.error("FM completeSize()", e);
@ -175,7 +175,7 @@ class FragmentedMessage {
ByteArray ba = _fragments[i];
// NPE seen here, root cause unknown
if (ba == null)
throw new IllegalStateException("wtf, don't get the completed size when we're not complete - null fragment i=" + i + " of " + _highFragmentNum);
throw new IllegalStateException("don't get the completed size when we're not complete! - null fragment i=" + i + " of " + _highFragmentNum);
size += ba.getValid();
}
return size;

View File

@ -20,6 +20,6 @@ class InboundGatewayProcessor extends HopProcessor {
public void process(byte orig[], int offset, int length) {
boolean ok = super.process(orig, offset, length, null);
if (!ok)
throw new RuntimeException("wtf, we are the gateway, how did it fail?");
throw new RuntimeException("we are the gateway, how did it fail?");
}
}

View File

@ -118,8 +118,8 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
}
return;
} else if (dsm.getReplyToken() != 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Dropping LS DSM w/ reply token down a tunnel for " + _client + ": " + msg);
_context.statManager().addRateData("tunnel.dropDangerousClientTunnelMessage", 1, type);
_log.error("Dropping LS DSM w/ reply token down a tunnel for " + _client + ": " + msg);
return;
} else {
// allow DSM of our own key (used by FloodfillVerifyStoreJob)
@ -144,6 +144,33 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
return;
} // switch
} else {
// expl. tunnel
switch (type) {
case DatabaseStoreMessage.MESSAGE_TYPE:
DatabaseStoreMessage dsm = (DatabaseStoreMessage) msg;
if (dsm.getReplyToken() != 0) {
_context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
_log.error("Dropping DSM w/ reply token down a expl. tunnel: " + msg);
return;
}
if (dsm.getEntry().getType() == DatabaseEntry.KEY_TYPE_LEASESET)
((LeaseSet)dsm.getEntry()).setReceivedAsReply();
break;
case DatabaseSearchReplyMessage.MESSAGE_TYPE:
case DeliveryStatusMessage.MESSAGE_TYPE:
case GarlicMessage.MESSAGE_TYPE:
case TunnelBuildReplyMessage.MESSAGE_TYPE:
case VariableTunnelBuildReplyMessage.MESSAGE_TYPE:
// these are safe, handled below
break;
default:
_context.statManager().addRateData("tunnel.dropDangerousExplTunnelMessage", 1, type);
_log.error("Dropped dangerous message down expl tunnel: " + msg, new Exception("cause"));
return;
} // switch
} // client != null
if ( (target == null) || ( (tunnel == null) && (_context.routerHash().equals(target) ) ) ) {
@ -189,7 +216,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
TunnelId outId = out.getSendTunnelId(0);
if (outId == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("wtf, outbound tunnel has no outboundId? " + out
_log.error("strange? outbound tunnel has no outboundId? " + out
+ " failing to distribute " + msg);
return;
}

View File

@ -211,7 +211,8 @@ public class TunnelDispatcher implements Service {
ctx.statManager().createRequiredRateStat("tunnel.corruptMessage", "Corrupt messages received",
"Tunnels", RATES);
// following are for InboundMessageDistributor
ctx.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "How many tunnel messages come down a client tunnel that we shouldn't expect (lifetime is the 'I2NP type')", "Tunnels", new long[] { 60*60*1000 });
ctx.statManager().createRateStat("tunnel.dropDangerousClientTunnelMessage", "(lifetime is the I2NP type)", "Tunnels", new long[] { 60*60*1000 });
ctx.statManager().createRateStat("tunnel.dropDangerousExplTunnelMessage", "(lifetime is the I2NP type)", "Tunnels", new long[] { 60*60*1000 });
ctx.statManager().createRateStat("tunnel.handleLoadClove", "When do we receive load test cloves", "Tunnels", new long[] { 60*60*1000 });
// following is for PumpedTunnelGateway
ctx.statManager().createRateStat("tunnel.dropGatewayOverflow", "Dropped message at GW, queue full", "Tunnels", new long[] { 60*60*1000 });
@ -630,7 +631,7 @@ public class TunnelDispatcher implements Service {
* @param targetPeer gateway to the tunnel to receive the message
*/
public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, TunnelId targetTunnel, Hash targetPeer) {
if (outboundTunnel == null) throw new IllegalArgumentException("wtf, null outbound tunnel?");
if (outboundTunnel == null) throw new IllegalArgumentException("null outbound tunnel?");
long before = _context.clock().now();
TunnelGateway gw = _outboundGateways.get(outboundTunnel);
if (gw != null) {
@ -677,7 +678,7 @@ public class TunnelDispatcher implements Service {
//long dispatchTime = _context.clock().now() - before;
//if (dispatchTime > 1000) {
// if (_log.shouldLog(Log.WARN))
// _log.warn("wtf, took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
// _log.warn("slow? took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
//}
//if (gw instanceof TunnelGatewayZeroHop)
// _context.statManager().addRateData("tunnel.dispatchOutboundZeroHopTime", dispatchTime, dispatchTime);

View File

@ -352,7 +352,7 @@ class BuildHandler implements Runnable {
default:
_context.statManager().addRateData("tunnel.receiveRejectionCritical", 1);
}
// penalize peer based on their bitchiness level
// penalize peer based on their reported error level
_context.profileManager().tunnelRejected(peer, rtt, howBad);
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
}

View File

@ -756,7 +756,7 @@ public class TunnelPool {
TunnelId inId = tunnel.getReceiveTunnelId(0);
Hash gw = tunnel.getPeer(0);
if ( (inId == null) || (gw == null) ) {
_log.error(toString() + ": wtf, tunnel has no inbound gateway/tunnelId? " + tunnel);
_log.error(toString() + ": broken? tunnel has no inbound gateway/tunnelId? " + tunnel);
continue;
}
Lease lease = new Lease();