Refine search
private long limit(long bytes, String label) { long newUsed;// Otherwise, check the addition and commit the addition, looping if // there are conflicts. May result in additional logging, but it's // trace logging and shouldn't be counted on for additions. long currentUsed; do { currentUsed = this.used.get(); newUsed = currentUsed + bytes; long newUsedWithOverhead = (long) (newUsed * overheadConstant); if (logger.isTraceEnabled()) { logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: {} [{}], estimate: {} [{}]]", this.name, new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed), memoryBytesLimit, new ByteSizeValue(memoryBytesLimit), newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead)); } if (memoryBytesLimit > 0 && newUsedWithOverhead > memoryBytesLimit) { logger.warn("[{}] New used memory {} [{}] for data of [{}] would be larger than configured breaker: {} [{}], breaking", this.name, newUsedWithOverhead, new ByteSizeValue(newUsedWithOverhead), label, memoryBytesLimit, new ByteSizeValue(memoryBytesLimit)); circuitBreak(label, newUsedWithOverhead); } // Attempt to set the new used value, but make sure it hasn't changed // underneath us, if it has, keep trying until we are able to set it } while (!this.used.compareAndSet(currentUsed, newUsed)); return newUsed; }
/** Shard calls this on each indexing/delete op */ public void bytesWritten(int bytes) { long totalBytes = bytesWrittenSinceCheck.addAndGet(bytes); assert totalBytes >= 0; while (totalBytes > indexingBuffer.getBytes()/30) { if (runLock.tryLock()) { try { // Must pull this again because it may have changed since we first checked: totalBytes = bytesWrittenSinceCheck.get(); if (totalBytes > indexingBuffer.getBytes()/30) { bytesWrittenSinceCheck.addAndGet(-totalBytes); // NOTE: this is only an approximate check, because bytes written is to the translog, // vs indexing memory buffer which is typically smaller but can be larger in extreme // cases (many unique terms). This logic is here only as a safety against thread // starvation or too infrequent checking, to ensure we are still checking periodically, // in proportion to bytes processed by indexing: runUnlocked(); } } finally { runLock.unlock(); } // Must get it again since other threads could have increased it while we were in runUnlocked totalBytes = bytesWrittenSinceCheck.get(); } else { // Another thread beat us to it: let them do all the work, yay! break; } } }
void setChunkSize(ByteSizeValue chunkSize) { // only settable for tests if (chunkSize.bytesAsInt() <= 0) { throw new IllegalArgumentException("chunkSize must be > 0"); } this.chunkSize = chunkSize; }
int bulkActions = XContentMapValues.nodeIntegerValue(bulkSettings.get(ACTIONS_FIELD), DEFAULT_BULK_ACTIONS); bulkBuilder.bulkActions(bulkActions); String size = XContentMapValues.nodeStringValue(bulkSettings.get(SIZE_FIELD), DEFAULT_BULK_SIZE.toString()); bulkBuilder.bulkSize(ByteSizeValue.parseBytesSizeValue(size)); bulkBuilder.concurrentRequests(XContentMapValues.nodeIntegerValue(bulkSettings.get(CONCURRENT_REQUESTS_FIELD), EsExecutors.boundedNumberOfProcessors(ImmutableSettings.EMPTY)));
public FsBlobStore(Settings settings, Path path) throws IOException { this.path = path; this.readOnly = settings.getAsBoolean("readonly", false); if (!this.readOnly) { Files.createDirectories(path); } this.bufferSizeInBytes = (int) settings.getAsBytesSize("repositories.fs.buffer_size", new ByteSizeValue(100, ByteSizeUnit.KB)).getBytes(); }
int skippedOps = 0; int totalSentOps = 0; final AtomicLong targetLocalCheckpoint = new AtomicLong(SequenceNumbers.UNASSIGNED_SEQ_NO); final List<Translog.Operation> operations = new ArrayList<>(); final LocalCheckpointTracker requiredOpsTracker = new LocalCheckpointTracker(endingSeqNo, requiredSeqNoRangeStart - 1); logger.trace("no translog operations to send"); final long targetCheckpoint = recoveryTarget.indexTranslogOperations( operations, expectedTotalOps, maxSeenAutoIdTimestamp, maxSeqNoOfUpdatesOrDeletes); targetLocalCheckpoint.set(targetCheckpoint); }; logger.trace("sent batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps); ops = 0; size = 0; logger.trace("sent final batch of [{}][{}] (total: [{}]) translog operations", ops, new ByteSizeValue(size), expectedTotalOps); return new SendSnapshotResult(targetLocalCheckpoint.get(), totalSentOps);
/** * Method used to trip the breaker, delegates to the parent to determine * whether to trip the breaker or not */ @Override public void circuitBreak(String fieldName, long bytesNeeded) { this.trippedCount.incrementAndGet(); final String message = "[" + this.name + "] Data too large, data for [" + fieldName + "]" + " would be [" + bytesNeeded + "/" + new ByteSizeValue(bytesNeeded) + "]" + ", which is larger than the limit of [" + memoryBytesLimit + "/" + new ByteSizeValue(memoryBytesLimit) + "]"; logger.debug("{}", message); throw new CircuitBreakingException(message, bytesNeeded, memoryBytesLimit); }
/** * Checks whether the parent breaker has been tripped */ public void checkParentLimit(String label) throws CircuitBreakingException { long totalUsed = 0; for (CircuitBreaker breaker : this.breakers.values()) { totalUsed += (breaker.getUsed() * breaker.getOverhead()); } long parentLimit = this.parentSettings.getLimit(); if (totalUsed > parentLimit) { this.parentTripCount.incrementAndGet(); final StringBuilder message = new StringBuilder("[parent] Data too large, data for [" + label + "]" + " would be [" + totalUsed + "/" + new ByteSizeValue(totalUsed) + "]" + ", which is larger than the limit of [" + parentLimit + "/" + new ByteSizeValue(parentLimit) + "]"); message.append(", usages ["); message.append(String.join(", ", this.breakers.entrySet().stream().map(e -> { final CircuitBreaker breaker = e.getValue(); final long breakerUsed = (long)(breaker.getUsed() * breaker.getOverhead()); return e.getKey() + "=" + breakerUsed + "/" + new ByteSizeValue(breakerUsed); }) .collect(Collectors.toList()))); message.append("]"); throw new CircuitBreakingException(message.toString(), totalUsed, parentLimit); } }
private long noLimit(long bytes, String label) { long newUsed; newUsed = this.used.addAndGet(bytes); if (logger.isTraceEnabled()) { logger.trace("[{}] Adding [{}][{}] to used bytes [new used: [{}], limit: [-1b]]", this.name, new ByteSizeValue(bytes), label, new ByteSizeValue(newUsed)); } return newUsed; }
Map<String, String> headers = new HashMap<>(); long headerSize = 0; long maxSize = maxHeaderSize.getBytes(); ThreadContext threadContext = threadPool.getThreadContext(); for (String key : taskHeaders) { Task task = request.createTask(taskIdGenerator.incrementAndGet(), type, action, request.getParentTask(), headers); if (task == null) { return null; if (logger.isTraceEnabled()) { logger.trace("register {} [{}] [{}] [{}]", task.getId(), type, action, task.getDescription());
logger.info("Elasticsearch Client for version {}.x connected to a node running version {}", compatibleVersion(), getVersion()); } catch (ElasticsearchStatusException e) { logger.debug("got an error while trying to connect to elasticsearch cluster"); throw new IOException(e); } catch (Exception e) { logger.warn("failed to create elasticsearch client, disabling crawler..."); throw e; (request, bulkListener) -> client.bulkAsync(request, bulkListener); threadPool = new ThreadPool(Settings.builder().put("node.name", "fscrawler-client").build()); bulkProcessor = new BulkProcessor.Builder(bulkConsumer, new DebugListener(logger), threadPool) .setBulkActions(settings.getElasticsearch().getBulkSize()) .setFlushInterval(TimeValue.timeValueMillis(settings.getElasticsearch().getFlushInterval().millis())) .setBulkSize(new ByteSizeValue(settings.getElasticsearch().getByteSize().getBytes())) .build();
@Override public void start() throws IOException { if (client != null) { // The client has already been initialized. Let's skip this again return; } try { // Create an elasticsearch client client = new RestHighLevelClient(buildRestClient(settings.getElasticsearch())); checkVersion(); logger.info("Elasticsearch Client for version {}.x connected to a node running version {}", compatibleVersion(), getVersion()); } catch (Exception e) { logger.warn("failed to create elasticsearch client, disabling crawler..."); throw e; } if (settings.getElasticsearch().getPipeline() != null) { // Check that the pipeline exists if (!isExistingPipeline(settings.getElasticsearch().getPipeline())) { throw new RuntimeException("You defined pipeline:" + settings.getElasticsearch().getPipeline() + ", but it does not exist."); } } BiConsumer<BulkRequest, ActionListener<BulkResponse>> bulkConsumer = (request, bulkListener) -> client.bulkAsync(request, RequestOptions.DEFAULT, bulkListener); bulkProcessor = BulkProcessor.builder(bulkConsumer, new DebugListener(logger)) .setBulkActions(settings.getElasticsearch().getBulkSize()) .setFlushInterval(TimeValue.timeValueMillis(settings.getElasticsearch().getFlushInterval().millis())) .setBulkSize(new ByteSizeValue(settings.getElasticsearch().getByteSize().getBytes())) .build(); }
private void setRequestBreakerLimit(ByteSizeValue newRequestMax, Double newRequestOverhead) { BreakerSettings newRequestSettings = new BreakerSettings(CircuitBreaker.REQUEST, newRequestMax.getBytes(), newRequestOverhead, HierarchyCircuitBreakerService.this.requestSettings.getType()); registerBreaker(newRequestSettings); HierarchyCircuitBreakerService.this.requestSettings = newRequestSettings; logger.info("Updated breaker settings request: {}", newRequestSettings); }
/** * Warn about the given disk usage if the low or high watermark has been passed */ private void warnAboutDiskIfNeeded(DiskUsage usage) { // Check absolute disk values if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdFloodStage().getBytes()) { logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will be marked read-only", diskThresholdSettings.getFreeBytesThresholdFloodStage(), usage); } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdHigh().getBytes()) { logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", diskThresholdSettings.getFreeBytesThresholdHigh(), usage); } else if (usage.getFreeBytes() < diskThresholdSettings.getFreeBytesThresholdLow().getBytes()) { logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node", diskThresholdSettings.getFreeBytesThresholdLow(), usage); } // Check percentage disk values if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdFloodStage()) { logger.warn("flood stage disk watermark [{}] exceeded on {}, all indices on this node will be marked read-only", Strings.format1Decimals(100.0 - diskThresholdSettings.getFreeDiskThresholdFloodStage(), "%"), usage); } else if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdHigh()) { logger.warn("high disk watermark [{}] exceeded on {}, shards will be relocated away from this node", Strings.format1Decimals(100.0 - diskThresholdSettings.getFreeDiskThresholdHigh(), "%"), usage); } else if (usage.getFreeDiskAsPercentage() < diskThresholdSettings.getFreeDiskThresholdLow()) { logger.info("low disk watermark [{}] exceeded on {}, replicas will not be assigned to this node", Strings.format1Decimals(100.0 - diskThresholdSettings.getFreeDiskThresholdLow(), "%"), usage); } }
IndexingMemoryController(Settings settings, ThreadPool threadPool, Iterable<IndexShard> indexServices) { this.indexShards = indexServices; ByteSizeValue indexingBuffer = INDEX_BUFFER_SIZE_SETTING.get(settings); String indexingBufferSetting = settings.get(INDEX_BUFFER_SIZE_SETTING.getKey()); // null means we used the default (10%) if (indexingBufferSetting == null || indexingBufferSetting.endsWith("%")) { // We only apply the min/max when % value was used for the index buffer: ByteSizeValue minIndexingBuffer = MIN_INDEX_BUFFER_SIZE_SETTING.get(settings); ByteSizeValue maxIndexingBuffer = MAX_INDEX_BUFFER_SIZE_SETTING.get(settings); if (indexingBuffer.getBytes() < minIndexingBuffer.getBytes()) { indexingBuffer = minIndexingBuffer; } if (maxIndexingBuffer.getBytes() != -1 && indexingBuffer.getBytes() > maxIndexingBuffer.getBytes()) { indexingBuffer = maxIndexingBuffer; } } this.indexingBuffer = indexingBuffer; this.inactiveTime = SHARD_INACTIVE_TIME_SETTING.get(settings); // we need to have this relatively small to free up heap quickly enough this.interval = SHARD_MEMORY_INTERVAL_TIME_SETTING.get(settings); this.statusChecker = new ShardsIndicesStatusChecker(); logger.debug("using indexing buffer size [{}] with {} [{}], {} [{}]", this.indexingBuffer, SHARD_INACTIVE_TIME_SETTING.getKey(), this.inactiveTime, SHARD_MEMORY_INTERVAL_TIME_SETTING.getKey(), this.interval); this.scheduler = scheduleTask(threadPool); // Need to save this so we can later launch async "write indexing buffer to disk" on shards: this.threadPool = threadPool; }
public IndicesQueryCache(Settings settings) { final ByteSizeValue size = INDICES_CACHE_QUERY_SIZE_SETTING.get(settings); final int count = INDICES_CACHE_QUERY_COUNT_SETTING.get(settings); logger.debug("using [node] query cache with size [{}] max filter count [{}]", size, count); if (INDICES_QUERIES_CACHE_ALL_SEGMENTS_SETTING.get(settings)) { cache = new ElasticsearchLRUQueryCache(count, size.getBytes(), context -> true); } else { cache = new ElasticsearchLRUQueryCache(count, size.getBytes()); } sharedRamBytesUsed = 0; }
public RecoverySettings(Settings settings, ClusterSettings clusterSettings) { this.retryDelayStateSync = INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING.get(settings); // doesn't have to be fast as nodes are reconnected every 10s by default (see InternalClusterService.ReconnectToNodes) // and we want to give the master time to remove a faulty node this.retryDelayNetwork = INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING.get(settings); this.internalActionTimeout = INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING.get(settings); this.internalActionLongTimeout = INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING.get(settings); this.activityTimeout = INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING.get(settings); this.maxBytesPerSec = INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING.get(settings); if (maxBytesPerSec.getBytes() <= 0) { rateLimiter = null; } else { rateLimiter = new SimpleRateLimiter(maxBytesPerSec.getMbFrac()); } logger.debug("using max_bytes_per_sec[{}]", maxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_MAX_BYTES_PER_SEC_SETTING, this::setMaxBytesPerSec); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_STATE_SYNC_SETTING, this::setRetryDelayStateSync); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_RETRY_DELAY_NETWORK_SETTING, this::setRetryDelayNetwork); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_ACTION_TIMEOUT_SETTING, this::setInternalActionTimeout); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_INTERNAL_LONG_ACTION_TIMEOUT_SETTING, this::setInternalActionLongTimeout); clusterSettings.addSettingsUpdateConsumer(INDICES_RECOVERY_ACTIVITY_TIMEOUT_SETTING, this::setActivityTimeout); }
public HierarchyCircuitBreakerService(Settings settings, ClusterSettings clusterSettings) { super(settings); this.fielddataSettings = new BreakerSettings(CircuitBreaker.FIELDDATA, FIELDDATA_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), FIELDDATA_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), FIELDDATA_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) ); IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), IN_FLIGHT_REQUESTS_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) REQUEST_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), REQUEST_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), REQUEST_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) ACCOUNTING_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), ACCOUNTING_CIRCUIT_BREAKER_OVERHEAD_SETTING.get(settings), ACCOUNTING_CIRCUIT_BREAKER_TYPE_SETTING.get(settings) TOTAL_CIRCUIT_BREAKER_LIMIT_SETTING.get(settings).getBytes(), 1.0, CircuitBreaker.Type.PARENT); if (logger.isTraceEnabled()) { logger.trace("parent circuit breaker with settings {}", this.parentSettings);
/** * Creates a new ThreadContext instance * @param settings the settings to read the default request headers from */ public ThreadContext(Settings settings) { Settings headers = DEFAULT_HEADERS_SETTING.get(settings); if (headers == null) { this.defaultHeader = Collections.emptyMap(); } else { Map<String, String> defaultHeader = new HashMap<>(); for (String key : headers.names()) { defaultHeader.put(key, headers.get(key)); } this.defaultHeader = Collections.unmodifiableMap(defaultHeader); } threadLocal = new ContextThreadLocal(); this.maxWarningHeaderCount = SETTING_HTTP_MAX_WARNING_HEADER_COUNT.get(settings); this.maxWarningHeaderSize = SETTING_HTTP_MAX_WARNING_HEADER_SIZE.get(settings).getBytes(); }