2006-05-13 Complication

* Separate growth factors for tunnel count and tunnel test time
    * Reduce growth factors, so probabalistic throttle would activate
    * Square probAccept values to decelerate stronger when far from average
    * Create a bandwidth stat with approximately 15-second half life
    * Make allowTunnel() check the 1-second bandwidth for overload
      before doing allowance calculations using 15-second bandwidth
    * Tweak the overload detector in BuildExecutor to be more sensitive
      for rising edges, add ability to initiate tunnel drops
    * Add a function to seek and drop the highest-rate participating tunnel,
      keeping a fixed+random grace period between such drops.
      It doesn't seem very effective, so disabled by default
      ("router.dropTunnelsOnOverload=true" to enable)
This commit is contained in:
complication
2006-05-14 04:52:44 +00:00
committed by zzz
parent 2ad5a6f907
commit 5f17557e54
11 changed files with 241 additions and 63 deletions

View File

@ -1,4 +1,18 @@
$Id: history.txt,v 1.472 2006/05/09 16:17:17 jrandom Exp $
$Id: history.txt,v 1.473 2006/05/11 22:31:44 jrandom Exp $
2006-05-13 Complication
* Separate growth factors for tunnel count and tunnel test time
* Reduce growth factors, so probabalistic throttle would activate
* Square probAccept values to decelerate stronger when far from average
* Create a bandwidth stat with approximately 15-second half life
* Make allowTunnel() check the 1-second bandwidth for overload
before doing allowance calculations using 15-second bandwidth
* Tweak the overload detector in BuildExecutor to be more sensitive
for rising edges, add ability to initiate tunnel drops
* Add a function to seek and drop the highest-rate participating tunnel,
keeping a fixed+random grace period between such drops.
It doesn't seem very effective, so disabled by default
("router.dropTunnelsOnOverload=true" to enable)
2006-05-11 jrandom
* PRNG bugfix (thanks cervantes and Complication!)

View File

@ -1072,6 +1072,22 @@ public class Router {
}
return 0;
}
public int get15sRate() { return get15sRate(false); }
public int get15sRate(boolean outboundOnly) {
RouterContext ctx = _context;
if (ctx != null) {
FIFOBandwidthLimiter bw = ctx.bandwidthLimiter();
if (bw != null) {
int out = (int)bw.getSendBps15s();
if (outboundOnly)
return out;
return (int)Math.max(out, bw.getReceiveBps15s());
}
}
return 0;
}
public int get1mRate() { return get1mRate(false); }
public int get1mRate(boolean outboundOnly) {
int send = 0;

View File

@ -103,7 +103,7 @@ class RouterThrottleImpl implements RouterThrottle {
int numTunnels = _context.tunnelManager().getParticipatingCount();
if (numTunnels > getMinThrottleTunnels()) {
double growthFactor = getTunnelGrowthFactor();
double tunnelGrowthFactor = getTunnelGrowthFactor();
Rate avgTunnels = _context.statManager().getRate("tunnel.participatingTunnels").getRate(60*60*1000);
if (avgTunnels != null) {
double avg = 0;
@ -114,9 +114,10 @@ class RouterThrottleImpl implements RouterThrottle {
int min = getMinThrottleTunnels();
if (avg < min)
avg = min;
if ( (avg > 0) && (avg*growthFactor < numTunnels) ) {
if ( (avg > 0) && (avg*tunnelGrowthFactor < numTunnels) ) {
// we're accelerating, lets try not to take on too much too fast
double probAccept = (avg*growthFactor) / numTunnels;
double probAccept = (avg*tunnelGrowthFactor) / numTunnels;
probAccept = probAccept * probAccept; // square the decelerator for tunnel counts
int v = _context.random().nextInt(100);
if (v < probAccept*100) {
// ok
@ -132,15 +133,17 @@ class RouterThrottleImpl implements RouterThrottle {
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Accepting tunnel request, since the average is " + avg
_log.info("Accepting tunnel request, since the tunnel count average is " + avg
+ " and we only have " + numTunnels + ")");
}
}
}
Rate tunnelTestTime10m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(10*60*1000);
double tunnelTestTimeGrowthFactor = getTunnelTestTimeGrowthFactor();
Rate tunnelTestTime1m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(1*60*1000);
Rate tunnelTestTime60m = _context.statManager().getRate("tunnel.testSuccessTime").getRate(60*60*1000);
if ( (tunnelTestTime10m != null) && (tunnelTestTime60m != null) && (tunnelTestTime10m.getLastEventCount() > 0) ) {
double avg10m = tunnelTestTime10m.getAverageValue();
if ( (tunnelTestTime1m != null) && (tunnelTestTime60m != null) && (tunnelTestTime1m.getLastEventCount() > 0) ) {
double avg1m = tunnelTestTime1m.getAverageValue();
double avg60m = 0;
if (tunnelTestTime60m.getLastEventCount() > 0)
avg60m = tunnelTestTime60m.getAverageValue();
@ -150,23 +153,27 @@ class RouterThrottleImpl implements RouterThrottle {
if (avg60m < 2000)
avg60m = 2000; // minimum before complaining
if ( (avg60m > 0) && (avg10m > avg60m * growthFactor) ) {
double probAccept = (avg60m*growthFactor)/avg10m;
if ( (avg60m > 0) && (avg1m > avg60m * tunnelTestTimeGrowthFactor) ) {
double probAccept = (avg60m*tunnelTestTimeGrowthFactor)/avg1m;
probAccept = probAccept * probAccept; // square the decelerator for test times
int v = _context.random().nextInt(100);
if (v < probAccept*100) {
// ok
if (_log.shouldLog(Log.INFO))
_log.info("Probabalistically accept tunnel request (p=" + probAccept
+ " v=" + v + " test time avg 10m=" + avg10m + " 60m=" + avg60m + ")");
+ " v=" + v + " test time avg 1m=" + avg1m + " 60m=" + avg60m + ")");
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Probabalistically refusing tunnel request (test time avg 10m=" + avg10m
_log.warn("Probabalistically refusing tunnel request (test time avg 1m=" + avg1m
+ " 60m=" + avg60m + ")");
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg10m-avg60m), 0);
_context.statManager().addRateData("router.throttleTunnelProbTestSlow", (long)(avg1m-avg60m), 0);
return TunnelHistory.TUNNEL_REJECT_PROBABALISTIC_REJECT;
}
} else {
if (_log.shouldLog(Log.INFO))
_log.info("Accepting tunnel request, since 60m test time average is " + avg60m
+ " and past 1m only has " + avg1m + ")");
}
}
}
String maxTunnels = _context.getProperty(PROP_MAX_TUNNELS);
@ -201,8 +208,8 @@ class RouterThrottleImpl implements RouterThrottle {
}
if (messagesPerTunnel < DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE)
messagesPerTunnel = DEFAULT_MESSAGES_PER_TUNNEL_ESTIMATE;
int participatingTunnels = _context.tunnelManager().getParticipatingCount();
double bytesAllocated = messagesPerTunnel * participatingTunnels * 1024;
double bytesAllocated = messagesPerTunnel * numTunnels * net.i2p.router.tunnel.TrivialPreprocessor.PREPROCESSED_SIZE;
if (!allowTunnel(bytesAllocated, numTunnels)) {
_context.statManager().addRateData("router.throttleTunnelBandwidthExceeded", (long)bytesAllocated, 0);
@ -228,16 +235,23 @@ class RouterThrottleImpl implements RouterThrottle {
*/
private boolean allowTunnel(double bytesAllocated, int numTunnels) {
int maxKBps = Math.min(_context.bandwidthLimiter().getOutboundKBytesPerSecond(), _context.bandwidthLimiter().getInboundKBytesPerSecond());
int used1s = 0; //get1sRate(_context); // dont throttle on the 1s rate, its too volatile
int used1m = _context.router().get1mRate();
int used5m = 0; //get5mRate(_context); // don't throttle on the 5m rate, as that'd hide available bandwidth
int used = Math.max(Math.max(used1s, used1m), used5m);
int used1s = _context.router().get1sRate(); // dont throttle on the 1s rate, its too volatile
int used15s = _context.router().get15sRate();
int used1m = _context.router().get1mRate(); // dont throttle on the 1m rate, its too slow
int used = used15s;
double share = _context.router().getSharePercentage();
int availBps = (int)(((maxKBps*1024)*share) - used); //(int)(((maxKBps*1024) - used) * getSharePercentage());
// Write stats before making decisions
_context.statManager().addRateData("router.throttleTunnelBytesUsed", used, maxKBps);
_context.statManager().addRateData("router.throttleTunnelBytesAllowed", availBps, (long)bytesAllocated);
if (used1s > (maxKBps*1024)) {
if (_log.shouldLog(Log.WARN)) _log.warn("Reject tunnel, 1s rate (" + used1s + ") indicates overload.");
return false;
}
if (true) {
// ok, ignore any predictions of 'bytesAllocated', since that makes poorly
// grounded conclusions about future use (or even the bursty use). Instead,
@ -310,9 +324,17 @@ class RouterThrottleImpl implements RouterThrottle {
private double getTunnelGrowthFactor() {
try {
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "3.0"));
return Double.parseDouble(_context.getProperty("router.tunnelGrowthFactor", "1.3"));
} catch (NumberFormatException nfe) {
return 3.0;
return 1.3;
}
}
private double getTunnelTestTimeGrowthFactor() {
try {
return Double.parseDouble(_context.getProperty("router.tunnelTestTimeGrowthFactor", "1.3"));
} catch (NumberFormatException nfe) {
return 1.3;
}
}

View File

@ -49,6 +49,8 @@ public class FIFOBandwidthLimiter {
private long _lastStatsUpdated;
private float _sendBps;
private float _recvBps;
private float _sendBps15s;
private float _recvBps15s;
private static int __id = 0;
@ -66,6 +68,8 @@ public class FIFOBandwidthLimiter {
_context.statManager().createRateStat("bwLimiter.inboundDelayedTime", "How long it takes to honor an inbound request (ignoring ones with that go instantly)?", "BandwidthLimiter", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l });
_context.statManager().createRateStat("bw.sendBps1s", "How fast we are transmitting for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.recvBps1s", "How fast we are receiving for the 1s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.sendBps15s", "How fast we are transmitting for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_context.statManager().createRateStat("bw.recvBps15s", "How fast we are receiving for the 15s quantization (period is the number of bytes transmitted)?", "Bandwidth", new long[] { 60*1000l, 10*60*1000l });
_pendingInboundRequests = new ArrayList(16);
_pendingOutboundRequests = new ArrayList(16);
_lastTotalSent = _totalAllocatedOutboundBytes;
@ -97,6 +101,8 @@ public class FIFOBandwidthLimiter {
public void setOutboundUnlimited(boolean isUnlimited) { _outboundUnlimited = isUnlimited; }
public float getSendBps() { return _sendBps; }
public float getReceiveBps() { return _recvBps; }
public float getSendBps15s() { return _sendBps15s; }
public float getReceiveBps15s() { return _recvBps15s; }
public int getOutboundKBytesPerSecond() { return _refiller.getOutboundKBytesPerSecond(); }
public int getInboundKBytesPerSecond() { return _refiller.getInboundKBytesPerSecond(); }
@ -270,14 +276,16 @@ public class FIFOBandwidthLimiter {
private void updateStats() {
long now = now();
long time = now - _lastStatsUpdated;
// If at least one second has passed
if (time >= 1000) {
long totS = _totalAllocatedOutboundBytes;
long totR = _totalAllocatedInboundBytes;
long sent = totS - _lastTotalSent;
long recv = totR - _lastTotalReceived;
long sent = totS - _lastTotalSent; // How much we sent meanwhile
long recv = totR - _lastTotalReceived; // How much we received meanwhile
_lastTotalSent = totS;
_lastTotalReceived = totR;
_lastStatsUpdated = now;
if (_sendBps <= 0)
_sendBps = ((float)sent*1000f)/(float)time;
else
@ -286,12 +294,33 @@ public class FIFOBandwidthLimiter {
_recvBps = ((float)recv*1000f)/(float)time;
else
_recvBps = (0.9f)*_recvBps + (0.1f)*((float)recv*1000)/(float)time;
if (_log.shouldLog(Log.WARN)) {
if (_log.shouldLog(Log.INFO))
_log.info("BW: time = " + time + " sent: " + sent + " recv: " + recv);
_context.statManager().getStatLog().addData("bw", "bw.sendBps1s", (long)_sendBps, sent);
_context.statManager().getStatLog().addData("bw", "bw.recvBps1s", (long)_recvBps, recv);
}
// Maintain an approximate average with a 15-second halflife
// Weights (0.955 and 0.045) are tuned so that transition between two values (e.g. 0..10)
// would reach their midpoint (e.g. 5) in 15s
if (_sendBps15s <= 0)
_sendBps15s = ((float)sent*15*1000f)/(float)time;
else
_sendBps15s = (0.955f)*_sendBps15s + (0.045f)*((float)sent*1000f)/(float)time;
if (_recvBps15s <= 0)
_recvBps15s = ((float)recv*15*1000f)/(float)time;
else
_recvBps15s = (0.955f)*_recvBps15s + (0.045f)*((float)recv*1000)/(float)time;
if (_log.shouldLog(Log.WARN)) {
if (_log.shouldLog(Log.INFO))
_log.info("BW15: time = " + time + " sent: " + sent + " recv: " + recv);
_context.statManager().getStatLog().addData("bw", "bw.sendBps15s", (long)_sendBps15s, sent);
_context.statManager().getStatLog().addData("bw", "bw.recvBps15s", (long)_recvBps15s, recv);
}
}
}

View File

@ -23,6 +23,7 @@ public class HopConfig {
private SessionKey _ivKey;
private SessionKey _replyKey;
private ByteArray _replyIV;
private long _creation;
private long _expiration;
private Map _options;
private long _messagesProcessed;
@ -37,8 +38,10 @@ public class HopConfig {
_sendTo = null;
_layerKey = null;
_ivKey = null;
_creation = -1;
_expiration = -1;
_options = null;
_messagesProcessed = 0;
}
/** what tunnel ID are we receiving on? */
@ -95,6 +98,10 @@ public class HopConfig {
public long getExpiration() { return _expiration; }
public void setExpiration(long when) { _expiration = when; }
/** when was this tunnel created (in ms since the epoch)? */
public long getCreation() { return _creation; }
public void setCreation(long when) { _creation = when; }
/**
* what are the configuration options for this tunnel (if any). keys to
* this map should be strings and values should be Objects of an

View File

@ -22,7 +22,7 @@ public class TrivialPreprocessor implements TunnelGateway.QueuePreprocessor {
protected I2PAppContext _context;
private Log _log;
static final int PREPROCESSED_SIZE = 1024;
public static final int PREPROCESSED_SIZE = 1024;
protected static final int IV_SIZE = HopProcessor.IV_LENGTH;
protected static final ByteCache _dataCache = ByteCache.getInstance(512, PREPROCESSED_SIZE);
protected static final ByteCache _ivCache = ByteCache.getInstance(128, IV_SIZE);

View File

@ -38,6 +38,8 @@ public class TunnelDispatcher implements Service {
private long _lastParticipatingExpiration;
private BloomFilterIVValidator _validator;
private LeaveTunnel _leaveJob;
/** what is the date/time we last deliberately dropped a tunnel? **/
private long _lastDropTime;
/** Creates a new instance of TunnelDispatcher */
public TunnelDispatcher(RouterContext ctx) {
@ -49,6 +51,7 @@ public class TunnelDispatcher implements Service {
_inboundGateways = new HashMap();
_participatingConfig = new HashMap();
_lastParticipatingExpiration = 0;
_lastDropTime = 0;
_validator = null;
_leaveJob = new LeaveTunnel(ctx);
ctx.statManager().createRateStat("tunnel.participatingTunnels",
@ -528,6 +531,70 @@ public class TunnelDispatcher implements Service {
}
}
private static final int DROP_BASE_INTERVAL = 40 * 1000;
private static final int DROP_RANDOM_BOOST = 10 * 1000;
/**
* If a router is too overloaded to build its own tunnels,
* the build executor may call this.
*/
public void dropBiggestParticipating() {
List partTunnels = listParticipatingTunnels();
if ((partTunnels == null) || (partTunnels.size() == 0)) {
if (_log.shouldLog(Log.ERROR))
_log.error("Not dropping tunnel, since partTunnels was null or had 0 items!");
return;
}
long periodWithoutDrop = _context.clock().now() - _lastDropTime;
if (periodWithoutDrop < DROP_BASE_INTERVAL) {
if (_log.shouldLog(Log.WARN))
_log.error("Not dropping tunnel, since last drop was " + periodWithoutDrop + " ms ago!");
return;
}
HopConfig biggest = null;
HopConfig current = null;
long biggestMessages = 0;
long biggestAge = -1;
double biggestRate = 0;
for (int i=0; i<partTunnels.size(); i++) {
current = (HopConfig)partTunnels.get(i);
long currentMessages = current.getProcessedMessagesCount();
long currentAge = (_context.clock().now() - current.getCreation());
double currentRate = ((double) currentMessages / (currentAge / 1000));
// Determine if this is the biggest, but don't include tunnels
// with less than 20 messages (unpredictable rates)
if ((currentMessages > 20) && ((biggest == null) || (currentRate > biggestRate))) {
// Update our profile of the biggest
biggest = current;
biggestMessages = biggest.getProcessedMessagesCount();
biggestAge = (_context.clock().now() - current.getCreation());
biggestRate = ((double) biggestMessages / (biggestAge / 1000));
}
}
if (biggest == null) {
if (_log.shouldLog(Log.ERROR))
_log.error("Not dropping tunnel, since no suitable tunnel was found.");
return;
}
if (_log.shouldLog(Log.ERROR))
_log.error("Dropping tunnel with " + biggestRate + " messages/s and " + biggestMessages +
" messages, last drop was " + (periodWithoutDrop / 1000) + " s ago.");
remove(biggest);
_lastDropTime = _context.clock().now() + _context.random().nextInt(DROP_RANDOM_BOOST);
}
public void startup() {
// NB: 256 == assume max rate (size adjusted to handle 256 messages per second)
_validator = new BloomFilterIVValidator(_context, 256);

View File

@ -46,9 +46,6 @@ class BuildExecutor implements Runnable {
_handler = new BuildHandler(ctx, this);
}
// Estimated cost of one tunnel build attempt, bytes
private static final int BUILD_BANDWIDTH_ESTIMATE_BYTES = 5*1024;
private int allowed() {
StringBuffer buf = null;
if (_log.shouldLog(Log.DEBUG)) {
@ -118,52 +115,75 @@ class BuildExecutor implements Runnable {
return 0; // if we have a job heavily blocking our jobqueue, ssllloowww dddooowwwnnn
}
if (isOverloaded()) {
int used1s = _context.router().get1sRate(true);
// If 1-second average indicates we could manage building one tunnel
if ((maxKBps*1024) - used1s > BUILD_BANDWIDTH_ESTIMATE_BYTES) {
// Check if we're already building some tunnels
if (concurrent > 0) {
if (_log.shouldLog(Log.WARN))
_log.warn("Mild overload and favourable 1s rate (" + used1s + ") but already building, so allowed 0.");
return 0;
} else {
if (_log.shouldLog(Log.WARN))
_log.warn("Mild overload and favourable 1s rate(" + used1s + "), so allowed 1.");
return 1;
}
} else {
// Allow none
if (_log.shouldLog(Log.WARN))
_log.warn("We had serious overload, so allowed building 0.");
return 0;
}
}
// Trim the number of allowed tunnels for overload,
// initiate a tunnel drop on severe overload
allowed = trimForOverload(allowed,concurrent);
return allowed;
}
// Estimated cost of tunnel build attempt, bytes
private static final int BUILD_BANDWIDTH_ESTIMATE_BYTES = 5*1024;
/**
* Don't even try to build tunnels if we're saturated
*/
private boolean isOverloaded() {
//if (true) return false;
private int trimForOverload(int allowed, int concurrent) {
// dont include the inbound rates when throttling tunnel building, since
// that'd expose a pretty trivial attack.
int used1s = _context.router().get1sRate(true); // Avoid reliance on the 1s rate, too volatile
int used15s = _context.router().get15sRate(true);
int used1m = _context.router().get1mRate(true); // Avoid reliance on the 1m rate, too slow
int maxKBps = _context.bandwidthLimiter().getOutboundKBytesPerSecond();
int used1s = 0; // dont throttle on the 1s rate, its too volatile
int used1m = _context.router().get1mRate(true);
int used5m = 0; //get5mRate(_context); // don't throttle on the 5m rate, as that'd hide available bandwidth
int used = Math.max(Math.max(used1s, used1m), used5m);
if ((maxKBps * 1024) - used <= 0) {
int maxBps = maxKBps * 1024;
int overBuildLimit = maxBps - BUILD_BANDWIDTH_ESTIMATE_BYTES; // Beyond this, refrain from building
int nearBuildLimit = maxBps - (2*BUILD_BANDWIDTH_ESTIMATE_BYTES); // Beyond this, consider it close
// Detect any fresh overload which could set back tunnel building
if (Math.max(used1s,used15s) > overBuildLimit) {
if (_log.shouldLog(Log.WARN))
_log.warn("Too overloaded to build our own tunnels (used=" + used + ", maxKBps=" + maxKBps + ", 1s=" + used1s + ", 1m=" + used1m + ")");
return true;
} else {
return false;
_log.warn("Overloaded, trouble building tunnels (maxKBps=" + maxKBps +
", 1s=" + used1s + ", 15s=" + used15s + ", 1m=" + used1m + ")");
// Detect serious overload
if (((used1s > maxBps) && (used1s > used15s) && (used15s > nearBuildLimit)) ||
((used1s > maxBps) && (used15s > overBuildLimit)) ||
((used1s > overBuildLimit) && (used15s > overBuildLimit))) {
if (_log.shouldLog(Log.WARN))
_log.warn("Serious overload, allow building 0.");
// If so configured, drop biggest participating tunnel
if (Boolean.valueOf(_context.getProperty("router.dropTunnelsOnOverload","false")).booleanValue() == true) {
if (_log.shouldLog(Log.WARN))
_log.warn("Requesting drop of biggest participating tunnel.");
_context.tunnelDispatcher().dropBiggestParticipating();
}
return(0);
} else {
// Mild overload, check if we already build tunnels
if (concurrent <= 0) {
// We aren't building, allow 1
if (_log.shouldLog(Log.WARN))
_log.warn("Mild overload, allow building 1.");
return(1);
} else {
// Already building, allow 0
if (_log.shouldLog(Log.WARN))
_log.warn("Mild overload but already building " + concurrent + ", so allow 0.");
return(0);
}
}
}
// No overload, allow as requested
return(allowed);
}
public void run() {
_isRunning = true;
List wanted = new ArrayList(8);

View File

@ -438,6 +438,7 @@ class BuildHandler {
if (response == 0) {
HopConfig cfg = new HopConfig();
cfg.setCreation(_context.clock().now());
cfg.setExpiration(_context.clock().now() + 10*60*1000);
cfg.setIVKey(req.readIVKey());
cfg.setLayerKey(req.readLayerKey());

View File

@ -118,6 +118,7 @@ public class HandleTunnelCreateMessageJob extends JobImpl {
HopConfig cfg = new HopConfig();
long expiration = _request.getDurationSeconds()*1000 + getContext().clock().now();
cfg.setCreation(getContext().clock().now());
cfg.setExpiration(expiration);
cfg.setIVKey(_request.getIVKey());
cfg.setLayerKey(_request.getLayerKey());

View File

@ -607,6 +607,7 @@ public class TunnelPool {
int j = peers.size() - 1 - i;
cfg.setPeer(j, (Hash)peers.get(i));
HopConfig hop = cfg.getConfig(j);
hop.setCreation(_context.clock().now());
hop.setExpiration(expiration);
hop.setIVKey(_context.keyGenerator().generateSessionKey());
hop.setLayerKey(_context.keyGenerator().generateSessionKey());