private List<ServerAddress> getServerAddressForReplica(DBObject item) { String definition = item.get("host").toString(); if (definition.contains("/")) { definition = definition.substring(definition.indexOf("/") + 1); } if (logger.isDebugEnabled()) { logger.debug("getServerAddressForReplica - definition: {}", definition); } List<ServerAddress> servers = new ArrayList<ServerAddress>(); for (String server : definition.split(",")) { servers.add(new ServerAddress(server)); } return servers; }
private void checkBulkProcessorAvailability() { while (!isBulkProcessorAvailable()) { try { if (logger.isDebugEnabled()) { logger.debug("Waiting for bulk queue to empty..."); } Thread.sleep(2000); } catch (InterruptedException e) { logger.warn("checkIndexStatistics interrupted", e); } } }
if (logger.isDebugEnabled()) { logger.debug("Set explicit attachment mapping.");
@Override public void onFailure(Throwable e) { if (logger.isDebugEnabled()) { logger.debug( "Failed to stop reindexing for " + toIndex + ".", e); } } });
@Override public void onRejection(Throwable t) { run = false; if (threadPool.logger.isDebugEnabled()) { threadPool.logger.debug("scheduled task [{}] was rejected on thread pool [{}]", t, runnable, executor); } }
protected void sleep(final Throwable t) { final long waitTime = random.nextInt(1000) + 500L; if (logger.isDebugEnabled()) { logger.debug( "Waiting for {}ms and retrying... The cause is: " + t.getMessage(), waitTime); } try { Thread.sleep(waitTime); } catch (final InterruptedException e1) { // ignore } }
private void logException(Exception e) { if (logger.isDebugEnabled()) { logger.debug("Error writing to Graphite", e); } else { logger.warn("Error writing to Graphite: {}", e.getMessage()); } } }
private InternalSearchHits readHits(ChannelBufferStreamInput in) throws IOException { if (logger.isDebugEnabled()) { logger.debug("Reading hits..."); } return readSearchHits(in); }
@Override public void onResponse(final String token) { if (logger.isDebugEnabled()) { logger.debug("Token " + token + " is generated."); } ResponseUtil.send(request, channel, RestStatus.OK, "token", token); }
public boolean callback(long dwCtrlType) { int event = (int) dwCtrlType; if (logger.isDebugEnabled()) { logger.debug("console control handler receives event [{}@{}]", event, dwCtrlType); } return handler.handle(event); } }
public void start(final DiscoveryNode masterNode, String reason) { synchronized (masterNodeMutex) { if (logger.isDebugEnabled()) { logger.debug("[master] starting fault detection against master [{}], reason [{}]", masterNode, reason); } innerStart(masterNode); } }
private void queueMessage(String message){ try { if(logger.isDebugEnabled()) logger.debug("About to add a message..."); byte[] data = message.getBytes(); currentRequest.add(data, 0, data.length, false); if(logger.isDebugEnabled()) logger.debug("Current size" + currentRequest.numberOfActions()); processBulkIfNeeded(false); } catch (Exception e){ logger.error("Unable to build request"); } }
void onFetchFailure(Throwable t, ShardFetchSearchRequest fetchSearchRequest, int shardIndex, SearchShardTarget shardTarget, AtomicInteger counter) { if (logger.isDebugEnabled()) { logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id()); } this.addShardFailure(shardIndex, shardTarget, t); successfulOps.decrementAndGet(); if (counter.decrementAndGet() == 0) { finishHim(); } }
private InternalAggregations readAggregations(ChannelBufferStreamInput in) throws IOException { if (logger.isDebugEnabled()) { logger.debug("Reading aggregations..."); } InternalAggregations aggregations = null; if (in.readBoolean()) { aggregations = InternalAggregations.readAggregations(in); } return aggregations; }
private Suggest readSuggestions(ChannelBufferStreamInput in) throws IOException { if (logger.isDebugEnabled()) { logger.debug("Reading suggest..."); } Suggest suggest = null; if (in.readBoolean()) { suggest = Suggest.readSuggest(Suggest.Fields.SUGGEST, in); } return suggest; }
@Override public void onFailure(Throwable t) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("merge", "", t, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } super.onFailure(failure); } });
@Override protected Map<String, ExternalResult> externalDoReorder(Set<String> keySet) { if (logger.isDebugEnabled()) { logger.debug("External Neo4j booster for : " + keySet); logger.debug("Call: " + getEndpoint()); } return getReorderedResults(getExternalResults(keySet)); }
private void finishHim() { try { innerFinishHim(); } catch (Throwable e) { ReduceSearchPhaseException failure = new ReduceSearchPhaseException("fetch", "", e, buildShardFailures()); if (logger.isDebugEnabled()) { logger.debug("failed to reduce search", failure); } listener.onFailure(failure); } }
@Override public void onResponse(final IndexResponse response) { if (logger.isDebugEnabled()) { logger.debug( "Response: {}/{}/{}, Created: {}, Version: {}", response.getIndex(), response.getType(), response.getId(), response.isCreated(), response.getVersion()); } countDown(); }
/** Add to bulk an IndexRequest. */ private void esIndex(String index, String type, String id, byte[] json) throws Exception{ if (logger.isDebugEnabled()){ logger.debug("Indexing in ES " + index + ", " + type + ", " + id); } if (logger.isTraceEnabled()){ logger.trace("Json indexed : {}", json); } bulkProcessor.add(client.prepareIndex(index, type, id).setSource(json).request()); }