ParticipatingThrottler(RouterContext ctx) { this.context = ctx; this.counter = new ObjectCounter<Hash>(); ctx.simpleTimer2().addPeriodicEvent(new Cleaner(), CLEAN_TIME); }
ConnThrottler(int max, int totalMax, long period, SimpleTimer2 timer) { _max = max; _totalMax = totalMax; this.counter = new ObjectCounter<Hash>(); _currentTotal = new AtomicInteger(); // shorten the initial period by a random amount // to prevent correlation across destinations // and identification of router startup time timer.addPeriodicEvent(new Cleaner(), (period / 2) + RandomSource.getInstance().nextLong(period / 2), period); }
public IPThrottler(int max, long time) { _max = max; _counter = new ObjectCounter<Integer>(); SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), time); }
RequestThrottler(RouterContext ctx) { this.context = ctx; this.counter = new ObjectCounter<Hash>(); ctx.simpleTimer2().addPeriodicEvent(new Cleaner(), CLEAN_TIME); }
LookupThrottler() { this.counter = new ObjectCounter<ReplyTunnel>(); SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME); }
FloodThrottler() { this.counter = new ObjectCounter<Hash>(); SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), CLEAN_TIME); }
/** * For reliability reasons, don't allow a peer in more than x% of * client and exploratory tunnels. * * This also will prevent a single huge-capacity (or malicious) peer from * taking all the tunnels in the network (although it would be nice to limit * the % of total network tunnels to 10% or so, but that appears to be * too low to set as a default here... much lower than 33% will push client * tunnels out of the fast tier into high cap or beyond...) * * Possible improvement - restrict based on count per IP, or IP block, * to slightly increase costs of collusion * * @return Set of peers that should not be allowed in another tunnel */ public Set<Hash> selectPeersInTooManyTunnels() { ObjectCounter<Hash> lc = new ObjectCounter<Hash>(); int tunnelCount = countTunnelsPerPeer(lc); Set<Hash> rv = new HashSet<Hash>(); if (tunnelCount >= 4 && _context.router().getUptime() > 10*60*1000) { int max = _context.getProperty("router.maxTunnelPercentage", DEFAULT_MAX_PCT_TUNNELS); for (Hash h : lc.objects()) { if (lc.count(h) > 0 && (lc.count(h) + 1) * 100 / (tunnelCount + 1) > max) rv.add(h); } } return rv; }
public NegativeLookupCache(RouterContext context) { this.counter = new ObjectCounter<Hash>(); this.badDests = new LHMCache<Hash, Destination>(MAX_BAD_DESTS); this._maxFails = context.getProperty("netdb.negativeCache.maxFails",MAX_FAILS); final long cleanTime = context.getProperty("netdb.negativeCache.cleanupInterval", CLEAN_TIME); SimpleTimer2.getInstance().addPeriodicEvent(new Cleaner(), cleanTime); }
public EventPumper(RouterContext ctx, NTCPTransport transport) { _context = ctx; _log = ctx.logManager().getLog(getClass()); _transport = transport; _expireIdleWriteTime = MAX_EXPIRE_IDLE_TIME; _blockedIPs = new ObjectCounter<ByteArray>(); _context.statManager().createRateStat("ntcp.pumperKeySetSize", "", "ntcp", new long[] {10*60*1000} ); //_context.statManager().createRateStat("ntcp.pumperKeysPerLoop", "", "ntcp", new long[] {10*60*1000} ); _context.statManager().createRateStat("ntcp.pumperLoopsPerSecond", "", "ntcp", new long[] {10*60*1000} ); _context.statManager().createRateStat("ntcp.zeroRead", "", "ntcp", new long[] {10*60*1000} ); _context.statManager().createRateStat("ntcp.zeroReadDrop", "", "ntcp", new long[] {10*60*1000} ); _context.statManager().createRateStat("ntcp.dropInboundNoMessage", "", "ntcp", new long[] {10*60*1000} ); }
ConnThrottler(int max, int totalMax, long period, SimpleTimer2 timer) { _max = max; _totalMax = totalMax; this.counter = new ObjectCounter<Hash>(); _currentTotal = new AtomicInteger(); // shorten the initial period by a random amount // to prevent correlation across destinations // and identification of router startup time timer.addPeriodicEvent(new Cleaner(), (period / 2) + RandomSource.getInstance().nextLong(period / 2), period); }