public Subscriber(MultiSource multi) { this.multi = multi; // subscriptions this.subscriptions = new ConcurrentHashSet<Destination>(); }
public HashSetIVValidator() { _received = new ConcurrentHashSet<ByteArray>(); }
/** * All entries in this bucket will have at least one bit different * from us in the range [begin, end] inclusive. */ public KBucketImpl(I2PAppContext context, int begin, int end, int max, KBucketTrimmer<T> trimmer) { if (begin > end) throw new IllegalArgumentException(begin + " > " + end); _context = context; _entries = new ConcurrentHashSet<T>(max + 4); _begin = begin; _end = end; _max = max; _trimmer = trimmer; }
public TunnelConfig() { _context = I2PAppContext.getGlobalContext(); _booleanOptions = new ConcurrentHashSet<String>(4); _otherOptions = new ConcurrentHashMap<String, String>(4); }
public ExpirePeerEvent() { super(_context.simpleTimer2()); _expirePeers = new ConcurrentHashSet<PeerState>(128); _expireBuffer = new ArrayList<PeerState>(); }
/** * @param context RouterContext in production, I2PAppContext for testing only */ public GeoIP(I2PAppContext context) { _context = context; _log = context.logManager().getLog(GeoIP.class); _codeToName = new ConcurrentHashMap<String, String>(512); _codeCache = new ConcurrentHashMap<String, String>(512); _IPToCountry = new ConcurrentHashMap<Long, String>(); _pendingSearch = new ConcurrentHashSet<Long>(); _pendingIPv6Search = new ConcurrentHashSet<Long>(); _notFound = new ConcurrentHashSet<Long>(); _lock = new AtomicBoolean(); readCountryFile(); }
public OutboundMessageRegistry(RouterContext context) { _context = context; _log = _context.logManager().getLog(OutboundMessageRegistry.class); _selectors = new ArrayList<MessageSelector>(64); _selectorToMessage = new HashMap<MessageSelector, Object>(64); _activeMessages = new ConcurrentHashSet<OutNetMessage>(64); _cleanupTask = new CleanupTask(); }
/** * For signing and verification. * * If the context property netdb.family.name is set, this can be used for signing, * else only for verification. */ public FamilyKeyCrypto(RouterContext context) throws GeneralSecurityException { _context = context; _log = _context.logManager().getLog(FamilyKeyCrypto.class); _fname = _context.getProperty(PROP_FAMILY_NAME); if (_fname != null) { if (_fname.contains("/") || _fname.contains("\\") || _fname.contains("..") || (new File(_fname)).isAbsolute() || _fname.length() <= 0) throw new GeneralSecurityException("Illegal family name: " + _fname); } _privkey = (_fname != null) ? initialize() : null; _pubkey = (_privkey != null) ? _privkey.toPublic() : null; _verified = new ConcurrentHashMap<Hash, String>(4); _negativeCache = new ConcurrentHashSet<Hash>(4); _ourFamily = (_privkey != null) ? new ConcurrentHashSet<Hash>(4) : Collections.<Hash>emptySet(); }
/** * Create a new runner against the given socket * */ public ClientConnectionRunner(RouterContext context, ClientManager manager, Socket socket) { _context = context; _log = _context.logManager().getLog(ClientConnectionRunner.class); _manager = manager; _socket = socket; // unused for fastReceive _messages = new ConcurrentHashMap<MessageId, Payload>(); _sessions = new ConcurrentHashMap<Hash, SessionParams>(4); _alreadyProcessed = new ArrayList<MessageId>(); _acceptedPending = new ConcurrentHashSet<MessageId>(); _messageId = new AtomicInteger(_context.random().nextInt()); }
/** @param name just for logging / debugging / stats */ public DecayingHashSet(I2PAppContext context, int durationMs, int entryBytes, String name) { super(durationMs, entryBytes, name, context); if (entryBytes <= 0 || entryBytes > 32) throw new IllegalArgumentException("Bad size"); _current = new ConcurrentHashSet<ArrayWrapper>(128); _previous = new ConcurrentHashSet<ArrayWrapper>(128); if (_log.shouldLog(Log.DEBUG)) _log.debug("New DHS " + name + " entryBytes = " + entryBytes + " cycle (s) = " + (durationMs / 1000)); // try to get a handle on memory usage vs. false positives context.statManager().createRateStat("router.decayingHashSet." + name + ".size", "Size", "Router", new long[] { 10 * Math.max(60*1000, durationMs) }); context.statManager().createRateStat("router.decayingHashSet." + name + ".dups", "1000000 * Duplicates/Size", "Router", new long[] { 10 * Math.max(60*1000, durationMs) }); }
/** * Profiles are now loaded in a separate thread, * so this should return quickly. */ public PeerManager(RouterContext context) { _context = context; _log = context.logManager().getLog(PeerManager.class); _persistenceHelper = new ProfilePersistenceHelper(context); _organizer = context.profileOrganizer(); _organizer.setUs(context.routerHash()); _capabilitiesByPeer = new ConcurrentHashMap<Hash, String>(256); _peersByCapability = new HashMap<Character, Set<Hash>>(TRACKED_CAPS.length()); for (int i = 0; i < TRACKED_CAPS.length(); i++) _peersByCapability.put(Character.valueOf(Character.toLowerCase(TRACKED_CAPS.charAt(i))), new ConcurrentHashSet<Hash>()); loadProfilesInBackground(); ////_context.jobQueue().addJob(new EvaluateProfilesJob(_context)); //SimpleTimer2.getInstance().addPeriodicEvent(new Reorg(), 0, REORGANIZE_TIME); //new Reorg(); //_context.jobQueue().addJob(new PersistProfilesJob(_context, this)); }
/** * Base constructor in/out * @since 0.9.36 */ private NTCPConnection(RouterContext ctx, NTCPTransport transport, RouterAddress remAddr, boolean isIn) { _context = ctx; _log = ctx.logManager().getLog(getClass()); _created = ctx.clock().now(); _transport = transport; _remAddr = remAddr; _lastSendTime = _created; _lastReceiveTime = _created; _lastRateUpdated = _created; _readBufs = new ConcurrentLinkedQueue<ByteBuffer>(); _writeBufs = new ConcurrentLinkedQueue<ByteBuffer>(); _bwInRequests = new ConcurrentHashSet<Request>(2); _bwOutRequests = new ConcurrentHashSet<Request>(8); //_outbound = new CoDelPriorityBlockingQueue(ctx, "NTCP-Connection", 32); _outbound = new PriBlockingQueue<OutNetMessage>(ctx, "NTCP-Connection", 32); _currentOutbound = new ArrayList<OutNetMessage>(1); _isInbound = isIn; _inboundListener = new InboundListener(); _outboundListener = new OutboundListener(); }
/** * For webapp. * @param ctxPath generally "/i2psnark" * @param ctxName generally "i2psnark" * @since 0.9.6 */ public SnarkManager(I2PAppContext ctx, String ctxPath, String ctxName) { _snarks = new ConcurrentHashMap<String, Snark>(); _magnets = new ConcurrentHashSet<String>(); _addSnarkLock = new Object(); _context = ctx; _contextPath = ctxPath; _contextName = ctxName; _log = _context.logManager().getLog(SnarkManager.class); _messages = new UIMessages(MAX_MESSAGES); _util = new I2PSnarkUtil(_context, ctxName); String cfile = ctxName + CONFIG_FILE_SUFFIX; File configFile = new File(cfile); if (!configFile.isAbsolute()) configFile = new File(_context.getConfigDir(), cfile); _configDir = migrateConfig(configFile); _configFile = new File(_configDir, CONFIG_FILE); _trackerMap = new ConcurrentHashMap<String, Tracker>(4); loadConfig(null); if (!ctx.isRouterContext()) Runtime.getRuntime().addShutdownHook(new Thread(new TempDeleter(_util.getTempDir()), "Snark Temp Dir Deleter")); }
public LogManager(I2PAppContext context) { _displayOnScreen = true; _alreadyNoticedMissingConfig = false; _limits = new ConcurrentHashSet<LogLimit>(); _logs = new ConcurrentHashMap<Object, Log>(128); _defaultLimit = Log.ERROR; _context = context; _log = getLog(LogManager.class); String location = context.getProperty(CONFIG_LOCATION_PROP, CONFIG_LOCATION_DEFAULT); setConfig(location); _records = new LinkedBlockingQueue<LogRecord>(_logBufferSize); _consoleBuffer = new LogConsoleBuffer(_consoleBufferSize); // If we aren't in the router context, delay creating the LogWriter until required, // so it doesn't create a log directory and log files unless there is output. // In the router context, we have to rotate to a new log file at startup or the logs.jsp // page will display the old log. if (context.isRouterContext()) { startLogWriter(); } else { // Only in App Context. // In Router Context, the router has its own shutdown hook, // and will call our shutdown() from Router.finalShutdown() try { Runtime.getRuntime().addShutdownHook(new ShutdownHook()); } catch (IllegalStateException ise) { // shutdown in progress } } }
public UDPEndpointTestStandalone(RouterContext ctx) { _context = ctx; _log = ctx.logManager().getLog(UDPEndpointTestStandalone.class); _sentNotReceived = new ConcurrentHashSet<ByteArray>(128); }
public IntroductionManager(RouterContext ctx, UDPTransport transport) { _context = ctx; _log = ctx.logManager().getLog(IntroductionManager.class); _transport = transport; _builder = new PacketBuilder(ctx, transport); _outbound = new ConcurrentHashMap<Long, PeerState>(MAX_OUTBOUND); _inbound = new ConcurrentHashSet<PeerState>(MAX_INBOUND); _recentHolePunches = new HashSet<InetAddress>(16); ctx.statManager().createRateStat("udp.receiveRelayIntro", "How often we get a relayed request for us to talk to someone?", "udp", UDPTransport.RATES); ctx.statManager().createRateStat("udp.receiveRelayRequest", "How often we receive a good request to relay to someone else?", "udp", UDPTransport.RATES); ctx.statManager().createRateStat("udp.receiveRelayRequestBadTag", "Received relay requests with bad/expired tag", "udp", UDPTransport.RATES); ctx.statManager().createRateStat("udp.relayBadIP", "Received IP or port was bad", "udp", UDPTransport.RATES); }
/** * @param metainfo null if in magnet mode * @param storage null if in magnet mode */ public PeerCoordinator(I2PSnarkUtil util, byte[] id, byte[] infohash, MetaInfo metainfo, Storage storage, CoordinatorListener listener, Snark torrent) { _util = util; _random = util.getContext().random(); _log = util.getContext().logManager().getLog(PeerCoordinator.class); this.id = id; this.infohash = infohash; this.metainfo = metainfo; this.storage = storage; this.listener = listener; this.snark = torrent; wantedPieces = new ArrayList<Piece>(); setWantedPieces(); partialPieces = new ArrayList<PartialPiece>(getMaxConnections() + 1); peers = new LinkedBlockingDeque<Peer>(); magnetState = new MagnetState(infohash, metainfo); pexPeers = new ConcurrentHashSet<PeerID>(); // Install a timer to check the uploaders. // Randomize the first start time so multiple tasks are spread out, // this will help the behavior with global limits timer = new CheckEvent(_util.getContext(), new PeerCheckerTask(_util, this)); timer.schedule((CHECK_PERIOD / 2) + _random.nextInt((int) CHECK_PERIOD)); }
/** * Initialize the new transport * */ public TransportImpl(RouterContext context) { _context = context; _log = _context.logManager().getLog(getClass()); _context.statManager().createRateStat("transport.sendMessageFailureLifetime", "How long the lifetime of messages that fail are?", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRequiredRateStat("transport.sendMessageSize", "Size of sent messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRequiredRateStat("transport.receiveMessageSize", "Size of received messages (bytes)", "Transport", new long[] { 60*1000l, 5*60*1000l, 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("transport.receiveMessageTime", "How long it takes to read a message?", "Transport", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("transport.receiveMessageTimeSlow", "How long it takes to read a message (when it takes more than a second)?", "Transport", new long[] { 60*1000l, 5*60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRequiredRateStat("transport.sendProcessingTime", "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l }); //_context.statManager().createRateStat("transport.sendProcessingTime." + getStyle(), "Time to process and send a message (ms)", "Transport", new long[] { 60*1000l }); _context.statManager().createRateStat("transport.expiredOnQueueLifetime", "How long a message that expires on our outbound queue is processed", "Transport", new long[] { 60*1000l, 10*60*1000l, 60*60*1000l, 24*60*60*1000l } ); _currentAddresses = new CopyOnWriteArrayList<RouterAddress>(); if (getStyle().equals("NTCP")) _sendPool = new ArrayBlockingQueue<OutNetMessage>(8); else _sendPool = null; _unreachableEntries = new HashMap<Hash, Long>(32); _wasUnreachableEntries = new HashMap<Hash, Long>(32); _localAddresses = new ConcurrentHashSet<InetAddress>(4); _context.simpleTimer2().addPeriodicEvent(new CleanupUnreachable(), 2 * UNREACHABLE_PERIOD, UNREACHABLE_PERIOD / 2); }
/** * This is what I2PSocketManagerFactory.createManager() returns. * Direct instantiation by others is deprecated. * * @param context non-null * @param session non-null * @param opts may be null * @param name non-null */ public I2PSocketManagerFull(I2PAppContext context, I2PSession session, Properties opts, String name) { _context = context; _session = session; _subsessions = new ConcurrentHashSet<I2PSession>(4); _log = _context.logManager().getLog(I2PSocketManagerFull.class); _name = name + " " + (__managerId.incrementAndGet()); _acceptTimeout = ACCEPT_TIMEOUT_DEFAULT; _defaultOptions = new ConnectionOptions(opts); _connectionManager = new ConnectionManager(_context, _session, _defaultOptions); _serverSocket = new I2PServerSocketFull(this); if (_log.shouldLog(Log.INFO)) { _log.info("Socket manager created. \ndefault options: " + _defaultOptions + "\noriginal properties: " + opts); } debugInit(context); }
public FloodfillNetworkDatabaseFacade(RouterContext context) { super(context); _activeFloodQueries = new HashMap<Hash, FloodSearchJob>(); _verifiesInProgress = new ConcurrentHashSet<Hash>(8); _context.statManager().createRequiredRateStat("netDb.successTime", "Time for successful lookup (ms)", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.failedTime", "How long a failed search takes", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.failedRetries", "How many additional queries for an iterative search", "NetworkDatabase", new long[] { 60*60*1000l }); _context.statManager().createRateStat("netDb.successRetries", "How many additional queries for an iterative search", "NetworkDatabase", new long[] { 60*60*1000l }); _context.statManager().createRateStat("netDb.failedAttemptedPeers", "How many peers we sent a search to when the search fails", "NetworkDatabase", new long[] { 10*60*1000l }); _context.statManager().createRateStat("netDb.successPeers", "How many peers are contacted in a successful search", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.failedPeers", "How many peers fail to respond to a lookup?", "NetworkDatabase", new long[] { 60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchCount", "Overall number of searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchMessageCount", "Overall number of mesages for all searches sent", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchReplyValidated", "How many search replies we get that we are able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchReplyNotValidated", "How many search replies we get that we are NOT able to validate (fetch)", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.searchReplyValidationSkipped", "How many search replies we get from unreliable peers that we skip?", "NetworkDatabase", new long[] { 5*60*1000l, 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); _context.statManager().createRateStat("netDb.republishQuantity", "How many peers do we need to send a found leaseSet to?", "NetworkDatabase", new long[] { 10*60*1000l, 60*60*1000l, 3*60*60*1000l, 24*60*60*1000l }); // for ISJ _context.statManager().createRateStat("netDb.RILookupDirect", "Was an iterative RI lookup sent directly?", "NetworkDatabase", new long[] { 60*60*1000 }); _ffMonitor = new FloodfillMonitorJob(_context, this); }