/** * @return Netty server port */ public int getPort() { return _serverNettyConfig.getInt(NETTY_SERVER_PORT); } }
public int getMaxParallelRefreshThreads() { return _instanceDataManagerConfiguration.getInt(MAX_PARALLEL_REFRESH_THREADS, 1); }
public int getMaxParallelSegmentBuilds() { return _instanceDataManagerConfiguration.getInt(MAX_PARALLEL_SEGMENT_BUILDS, 0); }
private int checkGetOrDefaultPct(Configuration schedulerConfig, String key, int defaultValue) { int pct = schedulerConfig.getInt(key, defaultValue); if (pct <= 0 || pct > 100) { LOGGER.error("Incorrect value for {}, value: {}; using default: {}", key, pct, defaultValue); pct = defaultValue; } return pct; }
private Response valueTooBig() { return Response.status( 413 ).entity( String.format( "The property value provided was too large. The maximum size is currently set to %d bytes. " + "You can configure this by setting the '%s' property.", config.getInt(ServerSettings.maximum_response_header_size.name()), ServerSettings.maximum_response_header_size.name() ) ).build(); }
@Override public void init(Configuration configuration, TableConfig tableConfig, ZkHelixPropertyStore<ZNRecord> propertyStore, BrokerMetrics brokerMetrics) { super.init(configuration, tableConfig, propertyStore, brokerMetrics); _numRoutingTables = configuration.getInt(NUM_ROUTING_TABLES_KEY, DEFAULT_NUM_ROUTING_TABLES); }
@Override public void init(org.apache.commons.configuration.Configuration configs) { try { _retryCount = configs.getInt(RETRY, _retryCount); _retryWaitMs = configs.getInt(RETRY_WAITIME_MS, _retryWaitMs); Configuration hadoopConf = getConf(configs.getString(HADOOP_CONF_PATH)); authenticate(hadoopConf, configs); _hadoopFS = FileSystem.get(hadoopConf); LOGGER.info("successfully initialized hdfs segment fetcher"); } catch (Exception e) { LOGGER.error("failed to initialized the hdfs segment fetcher", e); } }
@Override public void init(Configuration config) { try { _retryCount = config.getInt(RETRY, _retryCount); _retryWaitMs = config.getInt(RETRY_WAITIME_MS, _retryWaitMs); _hadoopConf = getConf(config.getString(HADOOP_CONF_PATH)); authenticate(_hadoopConf, config); _hadoopFS = org.apache.hadoop.fs.FileSystem.get(_hadoopConf); LOGGER.info("successfully initialized HadoopPinotFS"); } catch (IOException e) { throw new RuntimeException("Could not initialize HadoopPinotFS", e); } }
public BaseBrokerRequestHandler(Configuration config, RoutingTable routingTable, TimeBoundaryService timeBoundaryService, AccessControlFactory accessControlFactory, TableQueryQuotaManager tableQueryQuotaManager, BrokerMetrics brokerMetrics) { _config = config; _routingTable = routingTable; _timeBoundaryService = timeBoundaryService; _accessControlFactory = accessControlFactory; _tableQueryQuotaManager = tableQueryQuotaManager; _brokerMetrics = brokerMetrics; _brokerId = config.getString(CONFIG_OF_BROKER_ID, getDefaultBrokerId()); _brokerTimeoutMs = config.getLong(CONFIG_OF_BROKER_TIMEOUT_MS, DEFAULT_BROKER_TIMEOUT_MS); _queryResponseLimit = config.getInt(CONFIG_OF_BROKER_QUERY_RESPONSE_LIMIT, DEFAULT_BROKER_QUERY_RESPONSE_LIMIT); _queryLogLength = config.getInt(CONFIG_OF_BROKER_QUERY_LOG_LENGTH, DEFAULT_BROKER_QUERY_LOG_LENGTH); LOGGER.info("Broker Id: {}, timeout: {}ms, query response limit: {}, query log length: {}", _brokerId, _brokerTimeoutMs, _queryResponseLimit, _queryLogLength); }
@Override public SchedulerGroup create(Configuration config, String groupName) { // max available tokens per millisecond equals number of threads (total execution capacity) // we are over provisioning tokens here because its better to keep pipe full rather than empty int maxTokensPerMs = rm.getNumQueryRunnerThreads() + rm.getNumQueryWorkerThreads(); int tokensPerMs = config.getInt(TOKENS_PER_MS_KEY, maxTokensPerMs); int tokenLifetimeMs = config.getInt(TOKEN_LIFETIME_MS_KEY, DEFAULT_TOKEN_LIFETIME_MS); return new TokenSchedulerGroup(groupName, tokensPerMs, tokenLifetimeMs); } };
public void start() { LOGGER.info("Starting Pinot Broker"); Utils.logVersions(); Preconditions.checkState(_state.get() == State.INIT); _state.set(State.STARTING); _brokerRequestHandler.start(); _brokerAdminApplication.start(_config .getInt(CommonConstants.Helix.KEY_OF_BROKER_QUERY_PORT, CommonConstants.Helix.DEFAULT_BROKER_QUERY_PORT)); _state.set(State.RUNNING); LOGGER.info("Pinot Broker started"); }
@SuppressWarnings("unchecked") public StarTreeV2Metadata(Configuration metadataProperties) { _numDocs = metadataProperties.getInt(TOTAL_DOCS); _dimensionsSplitOrder = metadataProperties.getList(DIMENSIONS_SPLIT_ORDER); _functionColumnPairs = new HashSet<>(); for (Object functionColumnPair : metadataProperties.getList(FUNCTION_COLUMN_PAIRS)) { _functionColumnPairs.add(AggregationFunctionColumnPair.fromColumnName((String) functionColumnPair)); } _maxLeafRecords = metadataProperties.getInt(MAX_LEAF_RECORDS); _skipStarNodeCreationForDimensions = new HashSet<>(metadataProperties.getList(SKIP_STAR_NODE_CREATION_FOR_DIMENSIONS)); }
@Override public void loadState(final Graph graph, final Configuration configuration) { dampingFactor = configuration.getDouble(DAMPING_FACTOR, 0.85D); maxIterations = configuration.getInt(MAX_ITERATIONS, 10); vertexCount = configuration.getLong(VERTEX_COUNT, 1L); }
@Override public void loadState(final Graph graph, final Configuration configuration) { maxDepth = configuration.getInt(MAX_DEPTH); seed = configuration.getLong(SEED); weightProperty = configuration.getString(WEIGHT_PROPERTY, "distance"); incidentMessageScope = MessageScope.Local.of(__::inE, (msg, edge) -> msg + edge.<Integer>value(weightProperty)); log.debug("Loaded maxDepth={}", maxDepth); }
@Override public void loadState(final Graph graph, final Configuration configuration) { maxDepth = configuration.getInt(MAX_DEPTH); seed = configuration.getLong(SEED); weightProperty = configuration.getString(WEIGHT_PROPERTY, "distance"); incidentMessageScope = MessageScope.Local.of(__::inE, (msg, edge) -> msg + edge.<Integer>value(weightProperty)); log.debug("Loaded maxDepth={}", maxDepth); }
@Override public void loadState(final Graph graph, final Configuration configuration) { dampingFactor = configuration.getDouble(DAMPING_FACTOR, 0.85D); maxIterations = configuration.getInt(MAX_ITERATIONS, 10); vertexCount = configuration.getLong(VERTEX_COUNT, 1L); }
public void init(Configuration cfg) { if (cfg.containsKey(CORE_POOL_SIZE_KEY)) { _corePoolSize = cfg.getInt(CORE_POOL_SIZE_KEY); } if (cfg.containsKey(MAX_POOL_SIZE_KEY)) { _maxPoolSize = cfg.getInt(MAX_POOL_SIZE_KEY); } if (cfg.containsKey(IDLE_TIMEOUT_MS_KEY)) { _idleTimeoutMs = cfg.getLong(IDLE_TIMEOUT_MS_KEY); } }
@SuppressWarnings("unchecked") private void loadConfig() { if (null == _tableCfg) { return; } _nodeToInstancesMap.clear(); _numNodes = _tableCfg.getInt(NUM_NODES_PER_REPLICA); for (int i = 0; i < _numNodes; i++) { _nodeToInstancesMap.put(i, _tableCfg.getList(getKey(SERVERS_FOR_NODE, Integer.toString(i)))); } _defaultServers = _tableCfg.getList(getKey(SERVERS_FOR_NODE, DEFAULT_SERVERS_FOR_NODE)); }
public static void init(Configuration uploaderConfig) { Configuration httpsConfig = uploaderConfig.subset(HTTPS_PROTOCOL); if (httpsConfig.getBoolean(CONFIG_OF_CONTROLLER_HTTPS_ENABLED, false)) { _sslContext = new ClientSSLContextGenerator(httpsConfig.subset(CommonConstants.PREFIX_OF_SSL_SUBSET)).generate(); _controllerHttpsPort = httpsConfig.getInt(CONFIG_OF_CONTROLLER_HTTPS_PORT); } }