forked from I2P_Developers/i2p.i2p
Language fixups.
This commit is contained in:
@ -110,7 +110,7 @@ public class ResettableGZIPInputStream extends InflaterInputStream {
|
|||||||
//if (_lookaheadStream.getEOFReached()) {
|
//if (_lookaheadStream.getEOFReached()) {
|
||||||
if (inf.finished()) {
|
if (inf.finished()) {
|
||||||
verifyFooter();
|
verifyFooter();
|
||||||
inf.reset(); // so it doesn't bitch about missing data...
|
inf.reset(); // so it doesn't complain about missing data...
|
||||||
_complete = true;
|
_complete = true;
|
||||||
}
|
}
|
||||||
return read;
|
return read;
|
||||||
|
109
history.txt
109
history.txt
@ -1,3 +1,6 @@
|
|||||||
|
2015-06-08 dg
|
||||||
|
* Language fixes
|
||||||
|
|
||||||
2015-06-06 str4d
|
2015-06-06 str4d
|
||||||
* newsxml: Don't use XXX for parsing dates on Android
|
* newsxml: Don't use XXX for parsing dates on Android
|
||||||
|
|
||||||
@ -4347,7 +4350,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
- Use new synchronized change-and-save-config methods
|
- Use new synchronized change-and-save-config methods
|
||||||
to eliminate races with ReadConfigJob
|
to eliminate races with ReadConfigJob
|
||||||
* Tunnels:
|
* Tunnels:
|
||||||
- When a peer is shitlisted, fail all our tunnels where
|
- When a peer is banlisted, fail all our tunnels where
|
||||||
that peer is the adjacent hop. In particular this
|
that peer is the adjacent hop. In particular this
|
||||||
will remove outbound tunnels when we can't contact
|
will remove outbound tunnels when we can't contact
|
||||||
the first hop, and enable quicker recovery.
|
the first hop, and enable quicker recovery.
|
||||||
@ -4866,7 +4869,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
|
|
||||||
2011-09-30 zzz
|
2011-09-30 zzz
|
||||||
* logs.jsp: Add wrapper version
|
* logs.jsp: Add wrapper version
|
||||||
* Shitlist: Shorten time
|
* Banlist: Shorten time
|
||||||
* Wrapper: Update armv7 to 3.5.12
|
* Wrapper: Update armv7 to 3.5.12
|
||||||
|
|
||||||
2011-09-30 kytv
|
2011-09-30 kytv
|
||||||
@ -4957,7 +4960,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
tunnels from being expired and causing high CPU usage
|
tunnels from being expired and causing high CPU usage
|
||||||
|
|
||||||
2011-09-08 zzz
|
2011-09-08 zzz
|
||||||
* Blocklist: Include IP in shitlist reason
|
* Blocklist: Include IP in banlist reason
|
||||||
* Ministreaming: Drop old classes replaced by streaming
|
* Ministreaming: Drop old classes replaced by streaming
|
||||||
years ago.
|
years ago.
|
||||||
* NTCP: Hopefully fix race NPE, thx devzero
|
* NTCP: Hopefully fix race NPE, thx devzero
|
||||||
@ -7129,7 +7132,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* Transport clock skews:
|
* Transport clock skews:
|
||||||
- Store and report UDP clock skews even for large values, so
|
- Store and report UDP clock skews even for large values, so
|
||||||
a badly skewed local clock will be reported to the console
|
a badly skewed local clock will be reported to the console
|
||||||
- Don't shitlist for NTCP clock skew if we don't know what time it is
|
- Don't banlist for NTCP clock skew if we don't know what time it is
|
||||||
- If NTP hasn't worked yet, have NTCP or SSU update the clock one time
|
- If NTP hasn't worked yet, have NTCP or SSU update the clock one time
|
||||||
- Include failed clock skew in NTCP skew vector if there aren't many connections
|
- Include failed clock skew in NTCP skew vector if there aren't many connections
|
||||||
- Don't include NTCP clock skews for non-established connections
|
- Don't include NTCP clock skews for non-established connections
|
||||||
@ -7491,7 +7494,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* addressbook: Move class to net.i2p.addressbook
|
* addressbook: Move class to net.i2p.addressbook
|
||||||
* build: Take two test scripts out of the installer
|
* build: Take two test scripts out of the installer
|
||||||
* i2psnark: Bye TPB
|
* i2psnark: Bye TPB
|
||||||
* Shitlist: Fix bug from two checkins ago, all were forever
|
* Banlist: Fix bug from two checkins ago, all were forever
|
||||||
|
|
||||||
2009-11-14 zzz
|
2009-11-14 zzz
|
||||||
* HTTP Proxy:
|
* HTTP Proxy:
|
||||||
@ -7506,7 +7509,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2009-11-11 zzz
|
2009-11-11 zzz
|
||||||
* Console: Some colon cleansing
|
* Console: Some colon cleansing
|
||||||
* FloodfillPeerSelector: Adjustments
|
* FloodfillPeerSelector: Adjustments
|
||||||
* Shitlist: Move HTML renderer to router console,
|
* Banlist: Move HTML renderer to router console,
|
||||||
add cause parameter for ease of translation,
|
add cause parameter for ease of translation,
|
||||||
tag all causes
|
tag all causes
|
||||||
|
|
||||||
@ -7854,13 +7857,13 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* logs.jsp: Add system encoding
|
* logs.jsp: Add system encoding
|
||||||
* Ministreaming: Cleanups, deprecation, move demos out of the lib
|
* Ministreaming: Cleanups, deprecation, move demos out of the lib
|
||||||
* netdb.jsp: Flags for leases
|
* netdb.jsp: Flags for leases
|
||||||
* NTCP: Clean up clock skew shitlist message
|
* NTCP: Clean up clock skew banlist message
|
||||||
* profiles.jsp:
|
* profiles.jsp:
|
||||||
- Rename the Failing column
|
- Rename the Failing column
|
||||||
- Reduce the time cutoff again to 90m (was 2h)
|
- Reduce the time cutoff again to 90m (was 2h)
|
||||||
* readme*html: localhost -> 127.0.0.1
|
* readme*html: localhost -> 127.0.0.1
|
||||||
* Router: Don't do some things when we are shutting down
|
* Router: Don't do some things when we are shutting down
|
||||||
* Shitlist: Clean up expire message
|
* Banlist: Clean up expire message
|
||||||
* Stats:
|
* Stats:
|
||||||
- Fix BufferedStatsLog so it works at all
|
- Fix BufferedStatsLog so it works at all
|
||||||
- Don't instantiate BufferedStatsLog unless stats.logFilters
|
- Don't instantiate BufferedStatsLog unless stats.logFilters
|
||||||
@ -7926,7 +7929,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
including our own
|
including our own
|
||||||
2) It randomly dies after a while
|
2) It randomly dies after a while
|
||||||
* Console:
|
* Console:
|
||||||
- Rename the shitlist and the blocklist
|
- Rename the banlist and the blocklist
|
||||||
- Try to reduce servlet problems on iframe
|
- Try to reduce servlet problems on iframe
|
||||||
- Select server or client icon for local dests
|
- Select server or client icon for local dests
|
||||||
* EepHead: New
|
* EepHead: New
|
||||||
@ -8661,7 +8664,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
spiff up tunnels.jsp and profiles.jsp.
|
spiff up tunnels.jsp and profiles.jsp.
|
||||||
Existing installs can get files with 'ant updaterWIthGeoIP'
|
Existing installs can get files with 'ant updaterWIthGeoIP'
|
||||||
or in the console docs bundle 'ant consoleDocs'
|
or in the console docs bundle 'ant consoleDocs'
|
||||||
- Use flags for shitlist and peers.jsp too
|
- Use flags for banlist and peers.jsp too
|
||||||
- Tweak tunnels.jsp to show class letters
|
- Tweak tunnels.jsp to show class letters
|
||||||
- Hide in-progress details on tunnels.jsp
|
- Hide in-progress details on tunnels.jsp
|
||||||
- Add a little color to confignav
|
- Add a little color to confignav
|
||||||
@ -9142,7 +9145,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
- Plug in-progress build leak
|
- Plug in-progress build leak
|
||||||
|
|
||||||
2009-02-07 zzz
|
2009-02-07 zzz
|
||||||
* ClientConnectionRunner, Shitlist, TunnelDispatcher:
|
* ClientConnectionRunner, Banlist, TunnelDispatcher:
|
||||||
Update using concurrent
|
Update using concurrent
|
||||||
* Streaming ConnectionHandler: Bound SYN queue and
|
* Streaming ConnectionHandler: Bound SYN queue and
|
||||||
use concurrent to prevent blowup
|
use concurrent to prevent blowup
|
||||||
@ -9290,13 +9293,13 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
- Floodfills periodically shuffle their KBuckets, and
|
- Floodfills periodically shuffle their KBuckets, and
|
||||||
FloodfillPeerSelector sorts more keys, so that
|
FloodfillPeerSelector sorts more keys, so that
|
||||||
exploration works well
|
exploration works well
|
||||||
* Shitlist: Reduce max time to 30m (was 60m)
|
* Banlist: Reduce max time to 30m (was 60m)
|
||||||
* Streaming:
|
* Streaming:
|
||||||
- Reduce default initial window size from 12 to 6,
|
- Reduce default initial window size from 12 to 6,
|
||||||
to account for the MTU increase in the last release
|
to account for the MTU increase in the last release
|
||||||
and try to limit initial packet loss
|
and try to limit initial packet loss
|
||||||
- Reduce fast retransmit threshold from 3 to 2
|
- Reduce fast retransmit threshold from 3 to 2
|
||||||
* Transport: Don't shitlist a peer if we are at our
|
* Transport: Don't banlist a peer if we are at our
|
||||||
connection limit
|
connection limit
|
||||||
|
|
||||||
2009-01-03 zzz
|
2009-01-03 zzz
|
||||||
@ -9420,7 +9423,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
|
|
||||||
2008-11-21 zzz
|
2008-11-21 zzz
|
||||||
* Cache DNS and negative DNS for 5m (was 1m and forever)
|
* Cache DNS and negative DNS for 5m (was 1m and forever)
|
||||||
* Delay shitlist cleaner at startup
|
* Delay banlist cleaner at startup
|
||||||
* Strip wrapper properties from client config
|
* Strip wrapper properties from client config
|
||||||
* Define multiple cert type
|
* Define multiple cert type
|
||||||
* Prohibit negative maxSends in streaming
|
* Prohibit negative maxSends in streaming
|
||||||
@ -9628,7 +9631,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* configpeer.jsp: Add blocklist info
|
* configpeer.jsp: Add blocklist info
|
||||||
* help.jsp: Add link to German FAQ
|
* help.jsp: Add link to German FAQ
|
||||||
* tunnels.jsp: Fix inactive participating count
|
* tunnels.jsp: Fix inactive participating count
|
||||||
* SearchReplyJob: Don't look up references to shitlisted peers
|
* SearchReplyJob: Don't look up references to banlisted peers
|
||||||
* TunnelPeerSelector: Avoid a peer for 20s after a reject or timeout
|
* TunnelPeerSelector: Avoid a peer for 20s after a reject or timeout
|
||||||
|
|
||||||
2008-09-20 zzz
|
2008-09-20 zzz
|
||||||
@ -9691,7 +9694,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2008-09-06 zzz
|
2008-09-06 zzz
|
||||||
* EepGet command line: Fix byte counts after a failed resume
|
* EepGet command line: Fix byte counts after a failed resume
|
||||||
* NTCP: Mark unreachable on outbound connection timeout
|
* NTCP: Mark unreachable on outbound connection timeout
|
||||||
* Shitlist: Fix partial shitlisting (still unused though)
|
* Banlist: Fix partial banlisting (still unused though)
|
||||||
* Summary Bar: Warn if firewalled and floodfill
|
* Summary Bar: Warn if firewalled and floodfill
|
||||||
* Throttle: Combine current and last bw measurement,
|
* Throttle: Combine current and last bw measurement,
|
||||||
reduce default max tunnels to 2500 (was 3000)
|
reduce default max tunnels to 2500 (was 3000)
|
||||||
@ -9735,16 +9738,16 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2008-08-04 zzz
|
2008-08-04 zzz
|
||||||
* Floodfill Peer Selector:
|
* Floodfill Peer Selector:
|
||||||
- Avoid peers whose netdb is old, or have a recent failed store,
|
- Avoid peers whose netdb is old, or have a recent failed store,
|
||||||
or are forever-shitlisted
|
or are forever-banlisted
|
||||||
|
|
||||||
2008-07-30 zzz
|
2008-07-30 zzz
|
||||||
* Blocklists:
|
* Blocklists:
|
||||||
- New, disabled by default, except for blocking of
|
- New, disabled by default, except for blocking of
|
||||||
forever-shitlisted peers. See source for instructions
|
forever-banlisted peers. See source for instructions
|
||||||
and file format.
|
and file format.
|
||||||
* Transport - Reject peers from inbound connections:
|
* Transport - Reject peers from inbound connections:
|
||||||
- Check IP against blocklist
|
- Check IP against blocklist
|
||||||
- Check router hash against forever-shitlist, then block IP
|
- Check router hash against forever-banlist, then block IP
|
||||||
|
|
||||||
2008-07-16 zzz
|
2008-07-16 zzz
|
||||||
* configpeer.jsp: New
|
* configpeer.jsp: New
|
||||||
@ -9776,7 +9779,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
- Change some logging from WARN to INFO
|
- Change some logging from WARN to INFO
|
||||||
- Clean up toString()
|
- Clean up toString()
|
||||||
* SSU:
|
* SSU:
|
||||||
- Try to pick better introducers by checking shitlist,
|
- Try to pick better introducers by checking banlist,
|
||||||
wasUnreachable list, failing list, and idle times
|
wasUnreachable list, failing list, and idle times
|
||||||
- To keep introducer connections up and valid,
|
- To keep introducer connections up and valid,
|
||||||
periodically send a "ping" (a data packet with no data and no acks)
|
periodically send a "ping" (a data packet with no data and no acks)
|
||||||
@ -9863,8 +9866,8 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
|
|
||||||
2008-06-07 zzz
|
2008-06-07 zzz
|
||||||
* NetDb: Tweak some logging on lease problems
|
* NetDb: Tweak some logging on lease problems
|
||||||
* Shitlist:
|
* Banlist:
|
||||||
- Add shitlistForever() and isShitlistedForever(), unused for now
|
- Add banlistForever() and isBanlistedForever(), unused for now
|
||||||
- Sort the HTML output by router hash
|
- Sort the HTML output by router hash
|
||||||
* netdb.jsp:
|
* netdb.jsp:
|
||||||
- Sort the lease HTML output by dest hash, local first
|
- Sort the lease HTML output by dest hash, local first
|
||||||
@ -9906,7 +9909,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
(for SAM for example). Defaults to true of course.
|
(for SAM for example). Defaults to true of course.
|
||||||
* Logging: Move common WARN output to DEBUG so we can ask users to
|
* Logging: Move common WARN output to DEBUG so we can ask users to
|
||||||
set the default log level to WARN without massive spewage
|
set the default log level to WARN without massive spewage
|
||||||
* ProfileOrganizer: Restrict !isSelectable() (i.e. shitlisted) peers from the High Capacity tier,
|
* ProfileOrganizer: Restrict !isSelectable() (i.e. banlisted) peers from the High Capacity tier,
|
||||||
not just the Fast tier, since we don't use them for tunnels anyway
|
not just the Fast tier, since we don't use them for tunnels anyway
|
||||||
* SAM: Add some compiler flexibility to two obscure makefiles
|
* SAM: Add some compiler flexibility to two obscure makefiles
|
||||||
* i2psnark: Change displayed peer idents to match that shown by bytemonsoon
|
* i2psnark: Change displayed peer idents to match that shown by bytemonsoon
|
||||||
@ -10069,7 +10072,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2008-04-17 zzz
|
2008-04-17 zzz
|
||||||
* Reachability:
|
* Reachability:
|
||||||
- Track unreachable peers persistently
|
- Track unreachable peers persistently
|
||||||
(i.e. separately from shitlist, and not cleared when they contact us)
|
(i.e. separately from banlist, and not cleared when they contact us)
|
||||||
- Exclude detected unreachable peers from inbound tunnels
|
- Exclude detected unreachable peers from inbound tunnels
|
||||||
- Exclude detected unreachable peers from selected leases
|
- Exclude detected unreachable peers from selected leases
|
||||||
- Exclude detected unreachable floodfill peers from lookups
|
- Exclude detected unreachable floodfill peers from lookups
|
||||||
@ -10077,15 +10080,15 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
|
|
||||||
2008-04-16 zzz
|
2008-04-16 zzz
|
||||||
* SSU/Reachability:
|
* SSU/Reachability:
|
||||||
- Extend shitlist time from 4-8m to 40-60m
|
- Extend banlist time from 4-8m to 40-60m
|
||||||
- Add some shitlist logging
|
- Add some banlist logging
|
||||||
- Don't shitlist twice when unreachable on all transports
|
- Don't banlist twice when unreachable on all transports
|
||||||
- Exclude netDb-listed unreachable peers from inbound tunnels;
|
- Exclude netDb-listed unreachable peers from inbound tunnels;
|
||||||
this won't help much since there are very few of these now
|
this won't help much since there are very few of these now
|
||||||
- Remove 10s delay on inbound UDP connections used for the
|
- Remove 10s delay on inbound UDP connections used for the
|
||||||
0.6.1.10 transition
|
0.6.1.10 transition
|
||||||
- Track and display UDP connection direction on peers.jsp
|
- Track and display UDP connection direction on peers.jsp
|
||||||
- Show shitlist status in-line on profiles.jsp
|
- Show banlist status in-line on profiles.jsp
|
||||||
|
|
||||||
2008-04-15 zzz
|
2008-04-15 zzz
|
||||||
* SSU Reachability/PeerTestManager:
|
* SSU Reachability/PeerTestManager:
|
||||||
@ -10183,7 +10186,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
|
|
||||||
2008-03-14 zzz
|
2008-03-14 zzz
|
||||||
* Floodfill Search:
|
* Floodfill Search:
|
||||||
- Prefer heard-from, unfailing, unshitlisted floodfill peers
|
- Prefer heard-from, unfailing, unbanlisted floodfill peers
|
||||||
|
|
||||||
2008-03-14 zzz
|
2008-03-14 zzz
|
||||||
* ProfileOrganizer:
|
* ProfileOrganizer:
|
||||||
@ -10968,8 +10971,8 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
new "identlog.txt" text file in the I2P install directory. For
|
new "identlog.txt" text file in the I2P install directory. For
|
||||||
debugging purposes, publish the count of how many identities the
|
debugging purposes, publish the count of how many identities the
|
||||||
router has cycled through, though not the identities itself.
|
router has cycled through, though not the identities itself.
|
||||||
* Cleaned up the way the multitransport shitlisting worked, and
|
* Cleaned up the way the multitransport banlisting worked, and
|
||||||
added per-transport shitlists
|
added per-transport banlists
|
||||||
* When dropping a router reference locally, first fire a netDb
|
* When dropping a router reference locally, first fire a netDb
|
||||||
lookup for the entry
|
lookup for the entry
|
||||||
* Take the peer selection filters into account when organizing the
|
* Take the peer selection filters into account when organizing the
|
||||||
@ -10999,7 +11002,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
even if those shouldn't exist)
|
even if those shouldn't exist)
|
||||||
|
|
||||||
2006-07-14 jrandom
|
2006-07-14 jrandom
|
||||||
* Improve the multitransport shitlisting (thanks Complication!)
|
* Improve the multitransport banlisting (thanks Complication!)
|
||||||
* Allow routers with a capacity of 16-32KBps to be used in tunnels under
|
* Allow routers with a capacity of 16-32KBps to be used in tunnels under
|
||||||
the default configuration (thanks for the stats Complication!)
|
the default configuration (thanks for the stats Complication!)
|
||||||
* Properly allow older router references to load on startup
|
* Properly allow older router references to load on startup
|
||||||
@ -11021,7 +11024,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
GNU/Classpath based JVMs
|
GNU/Classpath based JVMs
|
||||||
* Adjust the Fortuna PRNG's pooling system to reduce contention on
|
* Adjust the Fortuna PRNG's pooling system to reduce contention on
|
||||||
refill with a background thread to refill the output buffer
|
refill with a background thread to refill the output buffer
|
||||||
* Add per-transport support for the shitlist
|
* Add per-transport support for the banlist
|
||||||
* Add a new async pumped tunnel gateway to reduce tunnel dispatcher
|
* Add a new async pumped tunnel gateway to reduce tunnel dispatcher
|
||||||
contention
|
contention
|
||||||
|
|
||||||
@ -11091,7 +11094,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* SSU modifications to cut down on unnecessary connection failures
|
* SSU modifications to cut down on unnecessary connection failures
|
||||||
|
|
||||||
2006-05-16 jrandom
|
2006-05-16 jrandom
|
||||||
* Further shitlist randomizations
|
* Further banlist randomizations
|
||||||
* Adjust the stats monitored for detecting cpu overload when dropping new
|
* Adjust the stats monitored for detecting cpu overload when dropping new
|
||||||
tunnel requests
|
tunnel requests
|
||||||
|
|
||||||
@ -11304,7 +11307,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
picker.
|
picker.
|
||||||
* Cut down on subsequent streaming lib reset packets transmitted
|
* Cut down on subsequent streaming lib reset packets transmitted
|
||||||
* Use a larger MTU more often
|
* Use a larger MTU more often
|
||||||
* Allow netDb searches to query shitlisted peers, as the queries are
|
* Allow netDb searches to query banlisted peers, as the queries are
|
||||||
indirect.
|
indirect.
|
||||||
* Add an option to disable non-floodfill netDb searches (non-floodfill
|
* Add an option to disable non-floodfill netDb searches (non-floodfill
|
||||||
searches are used by default, but can be disabled by adding
|
searches are used by default, but can be disabled by adding
|
||||||
@ -11366,7 +11369,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
searches outside the floodfill set)
|
searches outside the floodfill set)
|
||||||
* Fix to the SSU IP detection code so we won't use introducers when we
|
* Fix to the SSU IP detection code so we won't use introducers when we
|
||||||
don't need them (thanks Complication!)
|
don't need them (thanks Complication!)
|
||||||
* Add a brief shitlist to i2psnark so it doesn't keep on trying to reach
|
* Add a brief banlist to i2psnark so it doesn't keep on trying to reach
|
||||||
peers given to it
|
peers given to it
|
||||||
* Don't let netDb searches wander across too many peers
|
* Don't let netDb searches wander across too many peers
|
||||||
* Don't use the 1s bandwidth usage in the tunnel participation throttle,
|
* Don't use the 1s bandwidth usage in the tunnel participation throttle,
|
||||||
@ -11925,7 +11928,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
left to the (work in progress) web UI.
|
left to the (work in progress) web UI.
|
||||||
|
|
||||||
2005-12-14 jrandom
|
2005-12-14 jrandom
|
||||||
* Fix to drop peer references when we shitlist people again (thanks zzz!)
|
* Fix to drop peer references when we banlist people again (thanks zzz!)
|
||||||
* Further I2PSnark fixes to deal with arbitrary torrent info attributes
|
* Further I2PSnark fixes to deal with arbitrary torrent info attributes
|
||||||
(thanks Complication!)
|
(thanks Complication!)
|
||||||
|
|
||||||
@ -12024,7 +12027,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
separate jdom or rome, as they're inside syndie.war.
|
separate jdom or rome, as they're inside syndie.war.
|
||||||
|
|
||||||
2005-11-30 jrandom
|
2005-11-30 jrandom
|
||||||
* Don't let the TCP transport alone shitlist a peer, since other
|
* Don't let the TCP transport alone banlist a peer, since other
|
||||||
transports may be working. Also display whether TCP connections are
|
transports may be working. Also display whether TCP connections are
|
||||||
inbound or outbound on the peers page.
|
inbound or outbound on the peers page.
|
||||||
* Fixed some substantial bugs in the SSU introducers where we wouldn't
|
* Fixed some substantial bugs in the SSU introducers where we wouldn't
|
||||||
@ -12092,7 +12095,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
address (read: SSU IP or port). This only offers minimal additional
|
address (read: SSU IP or port). This only offers minimal additional
|
||||||
protection against trivial attackers, but should provide functional
|
protection against trivial attackers, but should provide functional
|
||||||
improvement for people who have periodic IP changes, since their new
|
improvement for people who have periodic IP changes, since their new
|
||||||
router address would not be shitlisted while their old one would be.
|
router address would not be banlisted while their old one would be.
|
||||||
* Added further infrastructure for restricted route operation, but its use
|
* Added further infrastructure for restricted route operation, but its use
|
||||||
is not recommended.
|
is not recommended.
|
||||||
|
|
||||||
@ -12240,10 +12243,10 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* Improved the bandwidth throtting on tunnel participation, especially for
|
* Improved the bandwidth throtting on tunnel participation, especially for
|
||||||
low bandwidth peers.
|
low bandwidth peers.
|
||||||
* Improved failure handling in SSU with proactive reestablishment of
|
* Improved failure handling in SSU with proactive reestablishment of
|
||||||
failing idle peers, and rather than shitlisting a peer who failed too
|
failing idle peers, and rather than banlisting a peer who failed too
|
||||||
much, drop the SSU session and allow a new attempt (which, if it fails,
|
much, drop the SSU session and allow a new attempt (which, if it fails,
|
||||||
will cause a shitlisting)
|
will cause a banlisting)
|
||||||
* Clarify the cause of the shitlist on the profiles page, and include
|
* Clarify the cause of the banlist on the profiles page, and include
|
||||||
bandwidth limiter info at the bottom of the peers page.
|
bandwidth limiter info at the bottom of the peers page.
|
||||||
|
|
||||||
2005-10-26 jrandom
|
2005-10-26 jrandom
|
||||||
@ -12256,7 +12259,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2005-10-25 jrandom
|
2005-10-25 jrandom
|
||||||
* Defer netDb searches for newly referenced peers until we actually want
|
* Defer netDb searches for newly referenced peers until we actually want
|
||||||
them
|
them
|
||||||
* Ignore netDb references to peers on our shitlist
|
* Ignore netDb references to peers on our banlist
|
||||||
* Set the timeout for end to end client messages to the max delay after
|
* Set the timeout for end to end client messages to the max delay after
|
||||||
finding the leaseSet, so we don't have as many expired messages floating
|
finding the leaseSet, so we don't have as many expired messages floating
|
||||||
around.
|
around.
|
||||||
@ -12471,7 +12474,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
reseeding - useful on OSes that make it hard to create dot files.
|
reseeding - useful on OSes that make it hard to create dot files.
|
||||||
Thanks Complication (and anon)!
|
Thanks Complication (and anon)!
|
||||||
* Fixed the installer version string (thanks Frontier!)
|
* Fixed the installer version string (thanks Frontier!)
|
||||||
* Added cleaner rejection of invalid IP addresses, shitlist those who send
|
* Added cleaner rejection of invalid IP addresses, banlist those who send
|
||||||
us invalid IP addresses, verify again that we are not sending invalid IP
|
us invalid IP addresses, verify again that we are not sending invalid IP
|
||||||
addresses, and log an error if it happens. (Thanks Complication, ptm,
|
addresses, and log an error if it happens. (Thanks Complication, ptm,
|
||||||
and adab!)
|
and adab!)
|
||||||
@ -12499,7 +12502,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
instead of k/cwin)
|
instead of k/cwin)
|
||||||
* Limit the number of inbound SSU sessions being built at once (using
|
* Limit the number of inbound SSU sessions being built at once (using
|
||||||
half of the i2np.udp.maxConcurrentEstablish config prop)
|
half of the i2np.udp.maxConcurrentEstablish config prop)
|
||||||
* Don't shitlist on a message send failure alone (unless there aren't any
|
* Don't banlist on a message send failure alone (unless there aren't any
|
||||||
common transports).
|
common transports).
|
||||||
* More careful bandwidth bursting
|
* More careful bandwidth bursting
|
||||||
|
|
||||||
@ -12636,7 +12639,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2005-09-13 jrandom
|
2005-09-13 jrandom
|
||||||
* More careful error handling with introductions (thanks dust!)
|
* More careful error handling with introductions (thanks dust!)
|
||||||
* Fix the forceIntroducers checkbox on config.jsp (thanks Complication!)
|
* Fix the forceIntroducers checkbox on config.jsp (thanks Complication!)
|
||||||
* Hide the shitlist on the summary so it doesn't confuse new users.
|
* Hide the banlist on the summary so it doesn't confuse new users.
|
||||||
|
|
||||||
2005-09-12 comwiz
|
2005-09-12 comwiz
|
||||||
* Migrated the router tests to junit
|
* Migrated the router tests to junit
|
||||||
@ -12743,8 +12746,8 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
2005-08-24 jrandom
|
2005-08-24 jrandom
|
||||||
* Catch errors with corrupt tunnel messages more gracefully (no need to
|
* Catch errors with corrupt tunnel messages more gracefully (no need to
|
||||||
kill the thread and cause an OOM...)
|
kill the thread and cause an OOM...)
|
||||||
* Don't skip shitlisted peers for netDb store messages, as they aren't
|
* Don't skip banlisted peers for netDb store messages, as they aren't
|
||||||
necessarily shitlisted by other people (though they probably are).
|
necessarily banlisted by other people (though they probably are).
|
||||||
* Adjust the netDb store per-peer timeout based on each particular peer's
|
* Adjust the netDb store per-peer timeout based on each particular peer's
|
||||||
profile (timeout = 4x their average netDb store response time)
|
profile (timeout = 4x their average netDb store response time)
|
||||||
* Don't republish leaseSets to *failed* peers - send them to peers who
|
* Don't republish leaseSets to *failed* peers - send them to peers who
|
||||||
@ -12787,14 +12790,14 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
netDb.
|
netDb.
|
||||||
* Don't overwrite the status with 'unknown' unless we haven't had a valid
|
* Don't overwrite the status with 'unknown' unless we haven't had a valid
|
||||||
status in a while.
|
status in a while.
|
||||||
* Make sure to avoid shitlisted peers for peer testing.
|
* Make sure to avoid banlisted peers for peer testing.
|
||||||
* When we get an unknown result to a peer test, try again soon afterwards.
|
* When we get an unknown result to a peer test, try again soon afterwards.
|
||||||
* When a peer tells us that our address is different from what we expect,
|
* When a peer tells us that our address is different from what we expect,
|
||||||
if we've done a recent peer test with a result of OK, fire off a peer
|
if we've done a recent peer test with a result of OK, fire off a peer
|
||||||
test to make sure our IP/port is still valid. If our test is old or the
|
test to make sure our IP/port is still valid. If our test is old or the
|
||||||
result was not OK, accept their suggestion, but queue up a peer test for
|
result was not OK, accept their suggestion, but queue up a peer test for
|
||||||
later.
|
later.
|
||||||
* Don't try to do a netDb store to a shitlisted peer, and adjust the way
|
* Don't try to do a netDb store to a banlisted peer, and adjust the way
|
||||||
we monitor netDb store progress (to clear up the high netDb.storePeers
|
we monitor netDb store progress (to clear up the high netDb.storePeers
|
||||||
stat)
|
stat)
|
||||||
|
|
||||||
@ -13426,7 +13429,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
* Fix a fencepost in the tunnel building so that e.g. a variance of
|
* Fix a fencepost in the tunnel building so that e.g. a variance of
|
||||||
2 means +/- 2, not +/- 1 (thanks dm!)
|
2 means +/- 2, not +/- 1 (thanks dm!)
|
||||||
* Avoid an NPE on client disconnect
|
* Avoid an NPE on client disconnect
|
||||||
* Never select a shitlisted peer to participate in a tunnel
|
* Never select a banlisted peer to participate in a tunnel
|
||||||
* Have netDb store messages timeout after 10s, not the full 60s (duh)
|
* Have netDb store messages timeout after 10s, not the full 60s (duh)
|
||||||
* Keep session tags around for a little longer, just in case (grr)
|
* Keep session tags around for a little longer, just in case (grr)
|
||||||
* Cleaned up some closing event issues on the streaming lib
|
* Cleaned up some closing event issues on the streaming lib
|
||||||
@ -14276,7 +14279,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
(thanks Jonva!)
|
(thanks Jonva!)
|
||||||
|
|
||||||
2004-10-04 jrandom
|
2004-10-04 jrandom
|
||||||
* Update the shitlist to reject a peer for an exponentially increasing
|
* Update the banlist to reject a peer for an exponentially increasing
|
||||||
period of time (with an upper bounds of an hour).
|
period of time (with an upper bounds of an hour).
|
||||||
* Various minor stat and debugging fixes
|
* Various minor stat and debugging fixes
|
||||||
|
|
||||||
@ -14290,7 +14293,7 @@ i2psnark: Fix ConnectionAcceptor not restarting after tunnel
|
|||||||
such as "dbResponseTime" or "tunnelTestResponseTime".
|
such as "dbResponseTime" or "tunnelTestResponseTime".
|
||||||
|
|
||||||
2004-10-02 jrandom
|
2004-10-02 jrandom
|
||||||
* Assure that we quickly fail messages bound for shitlisted peers.
|
* Assure that we quickly fail messages bound for banlisted peers.
|
||||||
* Address a race on startup where the first peer contacted could hang the
|
* Address a race on startup where the first peer contacted could hang the
|
||||||
router (thanks Romster!)
|
router (thanks Romster!)
|
||||||
* Only whine about an intermittent inability to query the time server once
|
* Only whine about an intermittent inability to query the time server once
|
||||||
|
@ -49,7 +49,7 @@ public class DataMessage extends FastI2NPMessageImpl {
|
|||||||
long size = DataHelper.fromLong(data, curIndex, 4);
|
long size = DataHelper.fromLong(data, curIndex, 4);
|
||||||
curIndex += 4;
|
curIndex += 4;
|
||||||
if (size > MAX_SIZE)
|
if (size > MAX_SIZE)
|
||||||
throw new I2NPMessageException("wtf, size=" + size);
|
throw new I2NPMessageException("too large msg, size=" + size);
|
||||||
_data = new byte[(int)size];
|
_data = new byte[(int)size];
|
||||||
System.arraycopy(data, curIndex, _data, 0, (int)size);
|
System.arraycopy(data, curIndex, _data, 0, (int)size);
|
||||||
}
|
}
|
||||||
|
@ -357,8 +357,8 @@ public class DeliveryInstructions extends DataStructureImpl {
|
|||||||
int offset = 0;
|
int offset = 0;
|
||||||
offset += getAdditionalInfo(rv, offset);
|
offset += getAdditionalInfo(rv, offset);
|
||||||
if (offset != additionalSize)
|
if (offset != additionalSize)
|
||||||
//_log.log(Log.CRIT, "wtf, additionalSize = " + additionalSize + ", offset = " + offset);
|
//_log.log(Log.CRIT, "size mismatch, additionalSize = " + additionalSize + ", offset = " + offset);
|
||||||
throw new IllegalStateException("wtf, additionalSize = " + additionalSize + ", offset = " + offset);
|
throw new IllegalStateException("size mismatch, additionalSize = " + additionalSize + ", offset = " + offset);
|
||||||
return rv;
|
return rv;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -164,7 +164,7 @@ public class I2NPMessageReader {
|
|||||||
_listener.disconnected(I2NPMessageReader.this);
|
_listener.disconnected(I2NPMessageReader.this);
|
||||||
cancelRunner();
|
cancelRunner();
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
_log.log(Log.CRIT, "wtf, error reading", e);
|
_log.log(Log.CRIT, "error reading msg!", e);
|
||||||
_listener.readError(I2NPMessageReader.this, e);
|
_listener.readError(I2NPMessageReader.this, e);
|
||||||
_listener.disconnected(I2NPMessageReader.this);
|
_listener.disconnected(I2NPMessageReader.this);
|
||||||
cancelRunner();
|
cancelRunner();
|
||||||
|
@ -62,7 +62,7 @@ public class TunnelGatewayMessage extends FastI2NPMessageImpl {
|
|||||||
if (_msg != null)
|
if (_msg != null)
|
||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
if (msg == null)
|
if (msg == null)
|
||||||
throw new IllegalArgumentException("wtf, dont set me to null");
|
throw new IllegalArgumentException("dont set me to null!");
|
||||||
_msg = msg;
|
_msg = msg;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -137,7 +137,7 @@ public class TunnelGatewayMessage extends FastI2NPMessageImpl {
|
|||||||
//handler.readMessage(data, curIndex);
|
//handler.readMessage(data, curIndex);
|
||||||
//_msg = handler.lastRead();
|
//_msg = handler.lastRead();
|
||||||
//if (_msg == null)
|
//if (_msg == null)
|
||||||
// throw new I2NPMessageException("wtf, message read has no payload?");
|
// throw new I2NPMessageException("impossible? message read has no payload?!");
|
||||||
|
|
||||||
// NEW WAY save lots of effort at the IBGW by reading as an UnknownI2NPMessage instead
|
// NEW WAY save lots of effort at the IBGW by reading as an UnknownI2NPMessage instead
|
||||||
// This will save a lot of object churn and processing,
|
// This will save a lot of object churn and processing,
|
||||||
|
@ -48,7 +48,7 @@ public class UnknownI2NPMessage extends FastI2NPMessageImpl {
|
|||||||
throw new IllegalStateException();
|
throw new IllegalStateException();
|
||||||
if (type != _type) throw new I2NPMessageException("Message type is incorrect for this message");
|
if (type != _type) throw new I2NPMessageException("Message type is incorrect for this message");
|
||||||
if (dataSize > MAX_SIZE)
|
if (dataSize > MAX_SIZE)
|
||||||
throw new I2NPMessageException("wtf, size=" + dataSize);
|
throw new I2NPMessageException("size mismatch, too big, size=" + dataSize);
|
||||||
_data = new byte[dataSize];
|
_data = new byte[dataSize];
|
||||||
System.arraycopy(data, offset, _data, 0, dataSize);
|
System.arraycopy(data, offset, _data, 0, dataSize);
|
||||||
}
|
}
|
||||||
|
@ -308,7 +308,7 @@ public class RouterInfo extends DatabaseEntry {
|
|||||||
*/
|
*/
|
||||||
protected byte[] getBytes() throws DataFormatException {
|
protected byte[] getBytes() throws DataFormatException {
|
||||||
if (_byteified != null) return _byteified;
|
if (_byteified != null) return _byteified;
|
||||||
if (_identity == null) throw new DataFormatException("Router identity isn't set? wtf!");
|
if (_identity == null) throw new DataFormatException("Router identity isn't set?!");
|
||||||
|
|
||||||
//long before = Clock.getInstance().now();
|
//long before = Clock.getInstance().now();
|
||||||
ByteArrayOutputStream out = new ByteArrayOutputStream(2*1024);
|
ByteArrayOutputStream out = new ByteArrayOutputStream(2*1024);
|
||||||
|
@ -162,11 +162,11 @@ public class Banlist {
|
|||||||
*/
|
*/
|
||||||
public boolean banlistRouter(Hash peer, String reason, String reasonCode, String transport, long expireOn) {
|
public boolean banlistRouter(Hash peer, String reason, String reasonCode, String transport, long expireOn) {
|
||||||
if (peer == null) {
|
if (peer == null) {
|
||||||
_log.error("wtf, why did we try to banlist null?", new Exception("banfaced"));
|
_log.error("why did we try to banlist null?", new Exception("banfaced"));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (peer.equals(_context.routerHash())) {
|
if (peer.equals(_context.routerHash())) {
|
||||||
_log.error("wtf, why did we try to banlist ourselves?", new Exception("banfaced"));
|
_log.error("why did we try to banlist ourselves?", new Exception("banfaced"));
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
boolean wasAlready = false;
|
boolean wasAlready = false;
|
||||||
|
@ -591,7 +591,7 @@ public class JobQueue {
|
|||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_context.clock().removeUpdateListener(this);
|
_context.clock().removeUpdateListener(this);
|
||||||
if (_log.shouldLog(Log.ERROR))
|
if (_log.shouldLog(Log.ERROR))
|
||||||
_log.error("wtf, pumper killed", t);
|
_log.error("pumper killed?!", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -117,7 +117,7 @@ class JobQueueRunner extends I2PThread {
|
|||||||
//if ( (jobNum % 10) == 0)
|
//if ( (jobNum % 10) == 0)
|
||||||
// System.gc();
|
// System.gc();
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_log.log(Log.CRIT, "WTF, error running?", t);
|
_log.log(Log.CRIT, "error running?", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
//_state = 16;
|
//_state = 16;
|
||||||
|
@ -852,7 +852,7 @@ public class Router implements RouterClock.ClockShiftListener {
|
|||||||
addCapabilities(ri);
|
addCapabilities(ri);
|
||||||
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
|
SigningPrivateKey key = _context.keyManager().getSigningPrivateKey();
|
||||||
if (key == null) {
|
if (key == null) {
|
||||||
_log.log(Log.CRIT, "Internal error - signing private key not known? wtf");
|
_log.log(Log.CRIT, "Internal error - signing private key not known? Impossible?");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
ri.sign(key);
|
ri.sign(key);
|
||||||
|
@ -122,7 +122,7 @@ public class VMCommSystem extends CommSystemFacade {
|
|||||||
|
|
||||||
_ctx.inNetMessagePool().add(msg, null, _from);
|
_ctx.inNetMessagePool().add(msg, null, _from);
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
_log.error("wtf, error reading/formatting a VM message?", e);
|
_log.error("Error reading/formatting a VM message? Something is not right...", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
public String getName() { return "Receive Message"; }
|
public String getName() { return "Receive Message"; }
|
||||||
|
@ -58,7 +58,7 @@ public class SendMessageDirectJob extends JobImpl {
|
|||||||
_targetHash = toPeer;
|
_targetHash = toPeer;
|
||||||
if (timeoutMs < 10*1000) {
|
if (timeoutMs < 10*1000) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy bastard"));
|
_log.warn("Very little time given [" + timeoutMs + "], resetting to 5s", new Exception("stingy caller!"));
|
||||||
_expiration = ctx.clock().now() + 10*1000;
|
_expiration = ctx.clock().now() + 10*1000;
|
||||||
} else {
|
} else {
|
||||||
_expiration = timeoutMs + ctx.clock().now();
|
_expiration = timeoutMs + ctx.clock().now();
|
||||||
|
@ -129,7 +129,7 @@ public class FloodfillNetworkDatabaseFacade extends KademliaNetworkDatabaseFacad
|
|||||||
*/
|
*/
|
||||||
@Override
|
@Override
|
||||||
public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException {
|
public void publish(RouterInfo localRouterInfo) throws IllegalArgumentException {
|
||||||
if (localRouterInfo == null) throw new IllegalArgumentException("wtf, null localRouterInfo?");
|
if (localRouterInfo == null) throw new IllegalArgumentException("impossible: null localRouterInfo?");
|
||||||
// should this be after super? why not publish locally?
|
// should this be after super? why not publish locally?
|
||||||
if (_context.router().isHidden()) return; // DE-nied!
|
if (_context.router().isHidden()) return; // DE-nied!
|
||||||
super.publish(localRouterInfo);
|
super.publish(localRouterInfo);
|
||||||
|
@ -58,7 +58,7 @@ class IterativeLookupJob extends JobImpl {
|
|||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
if (peer.equals(from)) {
|
if (peer.equals(from)) {
|
||||||
// wtf
|
// unusual
|
||||||
invalidPeers++;
|
invalidPeers++;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -646,7 +646,7 @@ public class KademliaNetworkDatabaseFacade extends NetworkDatabaseFacade {
|
|||||||
try {
|
try {
|
||||||
store(h, localLeaseSet);
|
store(h, localLeaseSet);
|
||||||
} catch (IllegalArgumentException iae) {
|
} catch (IllegalArgumentException iae) {
|
||||||
_log.error("wtf, locally published leaseSet is not valid?", iae);
|
_log.error("locally published leaseSet is not valid?", iae);
|
||||||
throw iae;
|
throw iae;
|
||||||
}
|
}
|
||||||
if (!_context.clientManager().shouldPublishLeaseSet(h))
|
if (!_context.clientManager().shouldPublishLeaseSet(h))
|
||||||
|
@ -484,7 +484,7 @@ class PersistentDataStore extends TransientDataStore {
|
|||||||
// don't overwrite recent netdb RIs with reseed data
|
// don't overwrite recent netdb RIs with reseed data
|
||||||
return fileDate > _knownDate + (60*60*1000);
|
return fileDate > _knownDate + (60*60*1000);
|
||||||
} else {
|
} else {
|
||||||
// wtf - prevent injection from reseeding
|
// safety measure - prevent injection from reseeding
|
||||||
_log.error("Prevented LS overwrite by RI " + _key + " from " + _routerFile);
|
_log.error("Prevented LS overwrite by RI " + _key + " from " + _routerFile);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -94,7 +94,7 @@ class SearchJob extends JobImpl {
|
|||||||
Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
|
Job onSuccess, Job onFailure, long timeoutMs, boolean keepStats, boolean isLease) {
|
||||||
super(context);
|
super(context);
|
||||||
if ( (key == null) || (key.getData() == null) )
|
if ( (key == null) || (key.getData() == null) )
|
||||||
throw new IllegalArgumentException("Search for null key? wtf");
|
throw new IllegalArgumentException("Search for null key?");
|
||||||
_log = getContext().logManager().getLog(getClass());
|
_log = getContext().logManager().getLog(getClass());
|
||||||
_facade = facade;
|
_facade = facade;
|
||||||
_state = new SearchState(getContext(), key);
|
_state = new SearchState(getContext(), key);
|
||||||
@ -425,7 +425,7 @@ class SearchJob extends JobImpl {
|
|||||||
Hash to = router.getIdentity().getHash();
|
Hash to = router.getIdentity().getHash();
|
||||||
TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
|
TunnelInfo inTunnel = getContext().tunnelManager().selectInboundExploratoryTunnel(to);
|
||||||
if (inTunnel == null) {
|
if (inTunnel == null) {
|
||||||
_log.warn("No tunnels to get search replies through! wtf!");
|
_log.warn("No tunnels to get search replies through!");
|
||||||
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
@ -436,7 +436,7 @@ class SearchJob extends JobImpl {
|
|||||||
|
|
||||||
//RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
|
//RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
|
||||||
//if (inGateway == null) {
|
//if (inGateway == null) {
|
||||||
// _log.error("We can't find the gateway to our inbound tunnel?! wtf");
|
// _log.error("We can't find the gateway to our inbound tunnel?!");
|
||||||
// getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
// getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||||
// return;
|
// return;
|
||||||
//}
|
//}
|
||||||
@ -448,7 +448,7 @@ class SearchJob extends JobImpl {
|
|||||||
|
|
||||||
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
|
TunnelInfo outTunnel = getContext().tunnelManager().selectOutboundExploratoryTunnel(to);
|
||||||
if (outTunnel == null) {
|
if (outTunnel == null) {
|
||||||
_log.warn("No tunnels to send search out through! wtf!");
|
_log.warn("No tunnels to send search out through! Impossible?");
|
||||||
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
getContext().jobQueue().addJob(new FailedJob(getContext(), router));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -101,7 +101,7 @@ class SearchUpdateReplyFoundJob extends JobImpl implements ReplyJob {
|
|||||||
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);
|
_job.replyFound((DatabaseSearchReplyMessage)message, _peer);
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.ERROR))
|
if (_log.shouldLog(Log.ERROR))
|
||||||
_log.error(getJobId() + ": WTF, reply job matched a strange message: " + message);
|
_log.error(getJobId() + ": What?! Reply job matched a strange message: " + message);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ class SingleLookupJob extends JobImpl {
|
|||||||
Hash peer = _dsrm.getReply(i);
|
Hash peer = _dsrm.getReply(i);
|
||||||
if (peer.equals(getContext().routerHash())) // us
|
if (peer.equals(getContext().routerHash())) // us
|
||||||
continue;
|
continue;
|
||||||
if (peer.equals(from)) // wtf
|
if (peer.equals(from)) // unusual?
|
||||||
continue;
|
continue;
|
||||||
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
|
RouterInfo ri = getContext().netDb().lookupRouterInfoLocally(peer);
|
||||||
if (ri == null)
|
if (ri == null)
|
||||||
|
@ -173,7 +173,7 @@ class CapacityCalculator {
|
|||||||
case 30*60*1000: return .3;
|
case 30*60*1000: return .3;
|
||||||
case 60*60*1000: return .2;
|
case 60*60*1000: return .2;
|
||||||
case 24*60*60*1000: return .1;
|
case 24*60*60*1000: return .1;
|
||||||
default: throw new IllegalArgumentException("wtf, period [" + period + "]???");
|
default: throw new IllegalArgumentException("undefined period passed, period [" + period + "]???");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ public class PeerTestJob extends JobImpl {
|
|||||||
private void testPeer(RouterInfo peer) {
|
private void testPeer(RouterInfo peer) {
|
||||||
TunnelInfo inTunnel = getInboundTunnelId();
|
TunnelInfo inTunnel = getInboundTunnelId();
|
||||||
if (inTunnel == null) {
|
if (inTunnel == null) {
|
||||||
_log.warn("No tunnels to get peer test replies through! wtf!");
|
_log.warn("No tunnels to get peer test replies through!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
|
TunnelId inTunnelId = inTunnel.getReceiveTunnelId(0);
|
||||||
@ -123,7 +123,7 @@ public class PeerTestJob extends JobImpl {
|
|||||||
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
|
RouterInfo inGateway = getContext().netDb().lookupRouterInfoLocally(inTunnel.getPeer(0));
|
||||||
if (inGateway == null) {
|
if (inGateway == null) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("We can't find the gateway to our inbound tunnel?! wtf");
|
_log.warn("We can't find the gateway to our inbound tunnel?! Impossible?");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,7 +135,7 @@ public class PeerTestJob extends JobImpl {
|
|||||||
|
|
||||||
TunnelInfo outTunnel = getOutboundTunnelId();
|
TunnelInfo outTunnel = getOutboundTunnelId();
|
||||||
if (outTunnel == null) {
|
if (outTunnel == null) {
|
||||||
_log.warn("No tunnels to send search out through! wtf!");
|
_log.warn("No tunnels to send search out through! Something is wrong...");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,7 +83,7 @@ class LoadRouterInfoJob extends JobImpl {
|
|||||||
try {
|
try {
|
||||||
// if we have a routerinfo but no keys, things go bad in a hurry:
|
// if we have a routerinfo but no keys, things go bad in a hurry:
|
||||||
// CRIT ...rkdb.PublishLocalRouterInfoJob: Internal error - signing private key not known? rescheduling publish for 30s
|
// CRIT ...rkdb.PublishLocalRouterInfoJob: Internal error - signing private key not known? rescheduling publish for 30s
|
||||||
// CRIT net.i2p.router.Router : Internal error - signing private key not known? wtf
|
// CRIT net.i2p.router.Router : Internal error - signing private key not known? Impossible?
|
||||||
// CRIT ...sport.udp.EstablishmentManager: Error in the establisher java.lang.NullPointerException
|
// CRIT ...sport.udp.EstablishmentManager: Error in the establisher java.lang.NullPointerException
|
||||||
// at net.i2p.router.transport.udp.PacketBuilder.buildSessionConfirmedPacket(PacketBuilder.java:574)
|
// at net.i2p.router.transport.udp.PacketBuilder.buildSessionConfirmedPacket(PacketBuilder.java:574)
|
||||||
// so pretend the RI isn't there if there is no keyfile
|
// so pretend the RI isn't there if there is no keyfile
|
||||||
|
@ -58,7 +58,7 @@ class GetBidsJob extends JobImpl {
|
|||||||
Hash us = context.routerHash();
|
Hash us = context.routerHash();
|
||||||
if (to.equals(us)) {
|
if (to.equals(us)) {
|
||||||
if (log.shouldLog(Log.ERROR))
|
if (log.shouldLog(Log.ERROR))
|
||||||
log.error("wtf, send a message to ourselves? nuh uh. msg = " + msg);
|
log.error("send a message to ourselves? nuh uh. msg = " + msg);
|
||||||
context.statManager().addRateData("transport.bidFailSelf", msg.getLifetime());
|
context.statManager().addRateData("transport.bidFailSelf", msg.getLifetime());
|
||||||
fail(context, msg);
|
fail(context, msg);
|
||||||
return;
|
return;
|
||||||
|
@ -195,9 +195,9 @@ public class OutboundMessageRegistry {
|
|||||||
*/
|
*/
|
||||||
private void registerPending(OutNetMessage msg, boolean allowEmpty) {
|
private void registerPending(OutNetMessage msg, boolean allowEmpty) {
|
||||||
if ( (!allowEmpty) && (msg.getMessage() == null) )
|
if ( (!allowEmpty) && (msg.getMessage() == null) )
|
||||||
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? wtf");
|
throw new IllegalArgumentException("OutNetMessage doesn't contain an I2NPMessage? Impossible?");
|
||||||
MessageSelector sel = msg.getReplySelector();
|
MessageSelector sel = msg.getReplySelector();
|
||||||
if (sel == null) throw new IllegalArgumentException("No reply selector? wtf");
|
if (sel == null) throw new IllegalArgumentException("No reply selector? Impossible?");
|
||||||
|
|
||||||
if (!_activeMessages.add(msg))
|
if (!_activeMessages.add(msg))
|
||||||
return; // dont add dups
|
return; // dont add dups
|
||||||
|
@ -373,9 +373,9 @@ public abstract class TransportImpl implements Transport {
|
|||||||
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
|
+ "): " + allTime + "ms/" + sendTime + "ms after failing on: "
|
||||||
+ msg.getFailedTransports() + " and succeeding on " + getStyle());
|
+ msg.getFailedTransports() + " and succeeding on " + getStyle());
|
||||||
if ( (allTime > 60*1000) && (sendSuccessful) ) {
|
if ( (allTime > 60*1000) && (sendSuccessful) ) {
|
||||||
// WTF!!@#
|
// VERY slow
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("WTF, more than a minute slow? " + msg.getMessageType()
|
_log.warn("Severe latency? More than a minute slow? " + msg.getMessageType()
|
||||||
+ " of id " + msg.getMessageId() + " (send begin on "
|
+ " of id " + msg.getMessageId() + " (send begin on "
|
||||||
+ new Date(msg.getSendBegin()) + " / created on "
|
+ new Date(msg.getSendBegin()) + " / created on "
|
||||||
+ new Date(msg.getCreated()) + "): " + msg);
|
+ new Date(msg.getCreated()) + "): " + msg);
|
||||||
@ -497,7 +497,7 @@ public abstract class TransportImpl implements Transport {
|
|||||||
_listener.messageReceived(inMsg, remoteIdent, remoteIdentHash);
|
_listener.messageReceived(inMsg, remoteIdent, remoteIdentHash);
|
||||||
} else {
|
} else {
|
||||||
if (_log.shouldLog(Log.ERROR))
|
if (_log.shouldLog(Log.ERROR))
|
||||||
_log.error("WTF! Null listener! this = " + toString(), new Exception("Null listener"));
|
_log.error("Null listener! this = " + toString(), new Exception("Null listener"));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -530,7 +530,7 @@ public class TransportManager implements TransportEventListener {
|
|||||||
if (msg == null)
|
if (msg == null)
|
||||||
throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!");
|
throw new IllegalArgumentException("Null message? no bidding on a null outNetMessage!");
|
||||||
if (_context.router().getRouterInfo().equals(msg.getTarget()))
|
if (_context.router().getRouterInfo().equals(msg.getTarget()))
|
||||||
throw new IllegalArgumentException("WTF, bids for a message bound to ourselves?");
|
throw new IllegalArgumentException("Bids for a message bound to ourselves?");
|
||||||
|
|
||||||
List<TransportBid> rv = new ArrayList<TransportBid>(_transports.size());
|
List<TransportBid> rv = new ArrayList<TransportBid>(_transports.size());
|
||||||
Set<String> failedTransports = msg.getFailedTransports();
|
Set<String> failedTransports = msg.getFailedTransports();
|
||||||
|
@ -100,7 +100,7 @@ class NTCPSendFinisher {
|
|||||||
// appx 0.1 ms
|
// appx 0.1 ms
|
||||||
//_context.statManager().addRateData("ntcp.sendFinishTime", _context.clock().now() - _queued, 0);
|
//_context.statManager().addRateData("ntcp.sendFinishTime", _context.clock().now() - _queued, 0);
|
||||||
} catch (Throwable t) {
|
} catch (Throwable t) {
|
||||||
_log.log(Log.CRIT, " wtf, afterSend borked", t);
|
_log.log(Log.CRIT, " afterSend broken?", t);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -168,7 +168,7 @@ class ACKSender implements Runnable {
|
|||||||
|
|
||||||
if (wanted < 0) {
|
if (wanted < 0) {
|
||||||
if (_log.shouldLog(Log.WARN))
|
if (_log.shouldLog(Log.WARN))
|
||||||
_log.warn("wtf, why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
|
_log.warn("why are we acking something they dont want? remaining=" + remaining + ", peer=" + peer + ", bitfields=" + ackBitfields);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,7 +178,7 @@ class OutboundMessageFragments {
|
|||||||
public void add(OutboundMessageState state) {
|
public void add(OutboundMessageState state) {
|
||||||
PeerState peer = state.getPeer();
|
PeerState peer = state.getPeer();
|
||||||
if (peer == null)
|
if (peer == null)
|
||||||
throw new RuntimeException("wtf, null peer for " + state);
|
throw new RuntimeException("null peer for " + state);
|
||||||
peer.add(state);
|
peer.add(state);
|
||||||
add(peer);
|
add(peer);
|
||||||
//_context.statManager().addRateData("udp.outboundActiveCount", active, 0);
|
//_context.statManager().addRateData("udp.outboundActiveCount", active, 0);
|
||||||
|
@ -370,7 +370,7 @@ class BatchedPreprocessor extends TrivialPreprocessor {
|
|||||||
|
|
||||||
if (offset <= 0) {
|
if (offset <= 0) {
|
||||||
StringBuilder buf = new StringBuilder(128);
|
StringBuilder buf = new StringBuilder(128);
|
||||||
buf.append("wtf, written offset is ").append(offset);
|
buf.append("uh? written offset is ").append(offset);
|
||||||
buf.append(" for ").append(startAt).append(" through ").append(sendThrough);
|
buf.append(" for ").append(startAt).append(" through ").append(sendThrough);
|
||||||
for (int i = startAt; i <= sendThrough; i++) {
|
for (int i = startAt; i <= sendThrough; i++) {
|
||||||
buf.append(" ").append(pending.get(i).toString());
|
buf.append(" ").append(pending.get(i).toString());
|
||||||
|
@ -164,7 +164,7 @@ class FragmentHandler {
|
|||||||
if (_log.shouldLog(Log.ERROR))
|
if (_log.shouldLog(Log.ERROR))
|
||||||
_log.error("Corrupt fragment received: offset = " + offset, e);
|
_log.error("Corrupt fragment received: offset = " + offset, e);
|
||||||
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
_context.statManager().addRateData("tunnel.corruptMessage", 1, 1);
|
||||||
// java.lang.IllegalStateException: wtf, don't get the completed size when we're not complete - null fragment i=0 of 1
|
// java.lang.IllegalStateException: don't get the completed size when we're not complete - null fragment i=0 of 1
|
||||||
// at net.i2p.router.tunnel.FragmentedMessage.getCompleteSize(FragmentedMessage.java:194)
|
// at net.i2p.router.tunnel.FragmentedMessage.getCompleteSize(FragmentedMessage.java:194)
|
||||||
// at net.i2p.router.tunnel.FragmentedMessage.toByteArray(FragmentedMessage.java:223)
|
// at net.i2p.router.tunnel.FragmentedMessage.toByteArray(FragmentedMessage.java:223)
|
||||||
// at net.i2p.router.tunnel.FragmentHandler.receiveComplete(FragmentHandler.java:380)
|
// at net.i2p.router.tunnel.FragmentHandler.receiveComplete(FragmentHandler.java:380)
|
||||||
|
@ -164,7 +164,7 @@ class FragmentedMessage {
|
|||||||
}
|
}
|
||||||
public int getCompleteSize() {
|
public int getCompleteSize() {
|
||||||
if (!_lastReceived)
|
if (!_lastReceived)
|
||||||
throw new IllegalStateException("wtf, don't get the completed size when we're not complete");
|
throw new IllegalStateException("don't get the completed size when we're not complete!");
|
||||||
if (_releasedAfter > 0) {
|
if (_releasedAfter > 0) {
|
||||||
RuntimeException e = new RuntimeException("use after free in FragmentedMessage");
|
RuntimeException e = new RuntimeException("use after free in FragmentedMessage");
|
||||||
_log.error("FM completeSize()", e);
|
_log.error("FM completeSize()", e);
|
||||||
@ -175,7 +175,7 @@ class FragmentedMessage {
|
|||||||
ByteArray ba = _fragments[i];
|
ByteArray ba = _fragments[i];
|
||||||
// NPE seen here, root cause unknown
|
// NPE seen here, root cause unknown
|
||||||
if (ba == null)
|
if (ba == null)
|
||||||
throw new IllegalStateException("wtf, don't get the completed size when we're not complete - null fragment i=" + i + " of " + _highFragmentNum);
|
throw new IllegalStateException("don't get the completed size when we're not complete! - null fragment i=" + i + " of " + _highFragmentNum);
|
||||||
size += ba.getValid();
|
size += ba.getValid();
|
||||||
}
|
}
|
||||||
return size;
|
return size;
|
||||||
|
@ -20,6 +20,6 @@ class InboundGatewayProcessor extends HopProcessor {
|
|||||||
public void process(byte orig[], int offset, int length) {
|
public void process(byte orig[], int offset, int length) {
|
||||||
boolean ok = super.process(orig, offset, length, null);
|
boolean ok = super.process(orig, offset, length, null);
|
||||||
if (!ok)
|
if (!ok)
|
||||||
throw new RuntimeException("wtf, we are the gateway, how did it fail?");
|
throw new RuntimeException("we are the gateway, how did it fail?");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -188,7 +188,7 @@ class InboundMessageDistributor implements GarlicMessageReceiver.CloveReceiver {
|
|||||||
TunnelId outId = out.getSendTunnelId(0);
|
TunnelId outId = out.getSendTunnelId(0);
|
||||||
if (outId == null) {
|
if (outId == null) {
|
||||||
if (_log.shouldLog(Log.ERROR))
|
if (_log.shouldLog(Log.ERROR))
|
||||||
_log.error("wtf, outbound tunnel has no outboundId? " + out
|
_log.error("strange? outbound tunnel has no outboundId? " + out
|
||||||
+ " failing to distribute " + msg);
|
+ " failing to distribute " + msg);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -630,7 +630,7 @@ public class TunnelDispatcher implements Service {
|
|||||||
* @param targetPeer gateway to the tunnel to receive the message
|
* @param targetPeer gateway to the tunnel to receive the message
|
||||||
*/
|
*/
|
||||||
public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, TunnelId targetTunnel, Hash targetPeer) {
|
public void dispatchOutbound(I2NPMessage msg, TunnelId outboundTunnel, TunnelId targetTunnel, Hash targetPeer) {
|
||||||
if (outboundTunnel == null) throw new IllegalArgumentException("wtf, null outbound tunnel?");
|
if (outboundTunnel == null) throw new IllegalArgumentException("null outbound tunnel?");
|
||||||
long before = _context.clock().now();
|
long before = _context.clock().now();
|
||||||
TunnelGateway gw = _outboundGateways.get(outboundTunnel);
|
TunnelGateway gw = _outboundGateways.get(outboundTunnel);
|
||||||
if (gw != null) {
|
if (gw != null) {
|
||||||
@ -677,7 +677,7 @@ public class TunnelDispatcher implements Service {
|
|||||||
//long dispatchTime = _context.clock().now() - before;
|
//long dispatchTime = _context.clock().now() - before;
|
||||||
//if (dispatchTime > 1000) {
|
//if (dispatchTime > 1000) {
|
||||||
// if (_log.shouldLog(Log.WARN))
|
// if (_log.shouldLog(Log.WARN))
|
||||||
// _log.warn("wtf, took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
|
// _log.warn("slow? took " + dispatchTime + " to dispatch " + msg + " out " + outboundTunnel + " in " + gw);
|
||||||
//}
|
//}
|
||||||
//if (gw instanceof TunnelGatewayZeroHop)
|
//if (gw instanceof TunnelGatewayZeroHop)
|
||||||
// _context.statManager().addRateData("tunnel.dispatchOutboundZeroHopTime", dispatchTime, dispatchTime);
|
// _context.statManager().addRateData("tunnel.dispatchOutboundZeroHopTime", dispatchTime, dispatchTime);
|
||||||
|
@ -352,7 +352,7 @@ class BuildHandler implements Runnable {
|
|||||||
default:
|
default:
|
||||||
_context.statManager().addRateData("tunnel.receiveRejectionCritical", 1);
|
_context.statManager().addRateData("tunnel.receiveRejectionCritical", 1);
|
||||||
}
|
}
|
||||||
// penalize peer based on their bitchiness level
|
// penalize peer based on their reported error level
|
||||||
_context.profileManager().tunnelRejected(peer, rtt, howBad);
|
_context.profileManager().tunnelRejected(peer, rtt, howBad);
|
||||||
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
|
_context.messageHistory().tunnelParticipantRejected(peer, "peer rejected after " + rtt + " with " + howBad + ": " + cfg.toString());
|
||||||
}
|
}
|
||||||
|
@ -749,7 +749,7 @@ public class TunnelPool {
|
|||||||
TunnelId inId = tunnel.getReceiveTunnelId(0);
|
TunnelId inId = tunnel.getReceiveTunnelId(0);
|
||||||
Hash gw = tunnel.getPeer(0);
|
Hash gw = tunnel.getPeer(0);
|
||||||
if ( (inId == null) || (gw == null) ) {
|
if ( (inId == null) || (gw == null) ) {
|
||||||
_log.error(toString() + ": wtf, tunnel has no inbound gateway/tunnelId? " + tunnel);
|
_log.error(toString() + ": broken? tunnel has no inbound gateway/tunnelId? " + tunnel);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
Lease lease = new Lease();
|
Lease lease = new Lease();
|
||||||
|
Reference in New Issue
Block a user