Refine search
if (log.isTraceEnabled()) { log.trace("Treat certificate with principal {} as other node because of it matches one of {}", Arrays.toString(principals), nodesDn); if (log.isTraceEnabled()) { log.trace("Treat certificate with principal {} NOT as other node because we it does not matches one of {}", Arrays.toString(principals), nodesDn); log.debug("Exception parsing certificate using {}", e, this.getClass()); throw new ElasticsearchException(e);
public XContentThrowableRestResponse(RestRequest request, Throwable t) throws IOException { this(request, ((t instanceof ElasticsearchException) ? ((ElasticsearchException) t).status() : RestStatus.INTERNAL_SERVER_ERROR), t); }
@Override public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException { Throwable ex = ExceptionsHelper.unwrapCause(this); if (ex != this) { generateThrowableXContent(builder, params, this); } else { innerToXContent(builder, params, this, getExceptionName(), getMessage(), headers, metadata, getCause()); } return builder; }
/** * Returns the root cause of this exception or multiple if different shards caused different exceptions */ public ElasticsearchException[] guessRootCauses() { final Throwable cause = getCause(); if (cause != null && cause instanceof ElasticsearchException) { return ((ElasticsearchException) cause).guessRootCauses(); } return new ElasticsearchException[]{this}; }
log.warn("If you plan to use field masking pls configure "+ConfigConstants.SEARCHGUARD_COMPLIANCE_SALT+" to be a random string of 16 chars length identical on all nodes"); throw new ElasticsearchException(ConfigConstants.SEARCHGUARD_COMPLIANCE_SALT+" must at least contain 16 bytes"); log.warn(ConfigConstants.SEARCHGUARD_COMPLIANCE_SALT+" is greater than 16 bytes. Only the first 16 bytes are used for salting"); log.error("Unable to check if auditlog index {} is part of compliance setup", index, e);
@SuppressWarnings("unchecked") public static <T> T instantiateAAA(final String clazz, final Settings settings, final Path configPath, final boolean checkEnterprise) { if (checkEnterprise && enterpriseModulesDisabled()) { throw new ElasticsearchException("Can not load '{}' because enterprise modules are disabled", clazz); } try { final Class<?> clazz0 = Class.forName(clazz); final T ret = (T) clazz0.getConstructor(Settings.class, Path.class).newInstance(settings, configPath); addLoadedModule(clazz0); return ret; } catch (final Throwable e) { log.warn("Unable to enable '{}' due to {}", clazz, e.toString()); if(log.isDebugEnabled()) { log.debug("Stacktrace: ",e); } throw new ElasticsearchException(e); } }
if (logger.isDebugEnabled()) { logger.debug("Loading hunspell dictionary [{}]...", locale); throw new ElasticsearchException(String.format(Locale.ROOT, "Could not find hunspell dictionary [%s]", locale)); throw new ElasticsearchException(String.format(Locale.ROOT, "Missing affix file for hunspell dictionary [%s]", locale)); throw new ElasticsearchException(String.format(Locale.ROOT, "Too many affix files exist for hunspell dictionary [%s]", locale)); logger.error(() -> new ParameterizedMessage("Could not load hunspell dictionary [{}]", locale), e); throw e; } finally {
logger.info("node name [{}], node ID [{}]", NODE_NAME_SETTING.get(tmpSettings), nodeEnvironment.nodeId()); } else { .put(NODE_NAME_SETTING.getKey(), nodeIdToNodeName(nodeEnvironment.nodeId())) .build(); logger.info("node name derived from node ID [{}]; set [{}] to override", nodeEnvironment.nodeId(), NODE_NAME_SETTING.getKey()); logger.info( "version[{}], pid[{}], build[{}/{}/{}/{}], OS[{}/{}/{}], JVM[{}/{}/{}/{}]", Version.displayVersion(Version.CURRENT, Build.CURRENT.isSnapshot()), throw new ElasticsearchException("failed to bind service", ex); } finally { if (!success) {
@Override public boolean handle(int code) { if (CTRL_CLOSE_EVENT == code) { logger.info("running graceful exit on windows"); try { Bootstrap.stop(); } catch (IOException e) { throw new ElasticsearchException("failed to stop node", e); } return true; } return false; } });
/** Add an incoming, not yet committed cluster state */ public synchronized void addPending(ClusterState state) { pendingStates.add(new ClusterStateContext(state)); if (pendingStates.size() > maxQueueSize) { ClusterStateContext context = pendingStates.remove(0); logger.warn("dropping pending state [{}]. more than [{}] pending states.", context, maxQueueSize); if (context.committed()) { context.listener.onNewClusterStateFailed(new ElasticsearchException("too many pending states ([{}] pending)", maxQueueSize)); } } }
} catch (FailedToCommitClusterStateException t) { logger.debug("failed to publish cluster state version [{}] (not enough nodes acknowledged, min master nodes [{}])", newState.version(), electMaster.minimumMasterNodes()); new ElasticsearchException("failed to publish cluster state")); if (sentToApplier == false && processedOrFailed.get() == false) { assert false : "cluster state published locally neither processed nor failed: " + newState; logger.warn("cluster state with version [{}] that is published locally has neither been processed nor failed", newState.version()); return; latch.await(); } catch (InterruptedException e) { logger.debug(() -> new ParameterizedMessage( "interrupted while applying cluster state locally [{}]", clusterChangedEvent.source()), e); Thread.currentThread().interrupt();
if(actionTrace.isTraceEnabled()) { getThreadContext().putHeader("_sg_trace"+System.currentTimeMillis()+"#"+UUID.randomUUID().toString(), Thread.currentThread().getName()+" DIR -> "+transportChannel.getChannelType()+" "+getThreadContext().getHeaders()); log.error("Internal or shard requests ("+task.getAction()+") not allowed from a non-server node for transport type "+transportChannel.getChannelType()); transportChannel.sendResponse(new ElasticsearchSecurityException( "Internal or shard requests not allowed from a non-server node for transport type "+transportChannel.getChannelType())); "No SSL client certificates found for transport type "+transportChannel.getChannelType()+". Search Guard needs the Search Guard SSL plugin to be installed"); auditLog.logSSLException(request, ex, task.getAction(), task); log.error("No SSL client certificates found for transport type "+transportChannel.getChannelType()+". Search Guard needs the Search Guard SSL plugin to be installed"); transportChannel.sendResponse(ex); return; } else { log.error("Request has no proper remote address {}", originalRemoteAddress); transportChannel.sendResponse(new ElasticsearchException("Request has no proper remote address")); return;
final IndexMetaData metaData = clusterState.getMetaData().indices().get(shardId.getIndexName()); logger.debug("{} deleted shard reason [{}]", shardId, reason); canDeleteIndexContents(shardId.getIndex(), indexSettings)) { if (nodeEnv.findAllShardIds(shardId.getIndex()).isEmpty()) { try { } catch (Exception e) { throw new ElasticsearchException("failed to delete unused index after deleting its last shard (" + shardId + ")", e); logger.trace("[{}] still has shard stores, leaving as is", shardId.getIndex());
try { final ShardId shardId = request.getShardId(); logger.trace("{} loading local shard state info", shardId); ShardStateMetaData shardStateMetaData = ShardStateMetaData.FORMAT.loadLatestState(logger, namedXContentRegistry, nodeEnv.availableShardPaths(request.shardId)); if (shardStateMetaData != null) { IndexMetaData metaData = clusterService.state().metaData().index(shardId.getIndex()); if (metaData == null) { nodeEnv.indexPaths(shardId.getIndex())); ElasticsearchException e = new ElasticsearchException("failed to find local IndexMetaData"); e.setShard(request.shardId); throw e; } catch (Exception exception) { final ShardPath finalShardPath = shardPath; logger.trace(() -> new ParameterizedMessage( "{} can't open index for shard [{}] in path [{}]", shardId, logger.debug("{} shard state info found: [{}]", shardId, shardStateMetaData); String allocationId = shardStateMetaData.allocationId != null ? shardStateMetaData.allocationId.getId() : null; return new NodeGatewayStartedShards(clusterService.localNode(), null, false); } catch (Exception e) { throw new ElasticsearchException("failed to load started shards", e);
if(clusterService.state().metaData().index(this.searchguardIndex).mapping("config") != null) { LOGGER.debug("sg index exists and was created before ES 6 (legacy layout)"); retVal.putAll(validate(legacycl.loadLegacy(configTypes.toArray(new String[0]), 5, TimeUnit.SECONDS), configTypes.size())); } else { LOGGER.debug("sg index exists and was created with ES 6 (new layout)"); retVal.putAll(validate(cl.load(configTypes.toArray(new String[0]), 5, TimeUnit.SECONDS), configTypes.size())); LOGGER.debug("sg index not exists (yet)"); retVal.putAll(validate(cl.load(configTypes.toArray(new String[0]), 30, TimeUnit.SECONDS), configTypes.size())); throw new ElasticsearchException(e);
@Override public QueryOrSearchResponse executeQuery(final String indexName, final String query, final String[] fields, final int maxRows) { try { final SearchResponse qresponse = proxy.search(buildSearchRequest(indexName, query, fields, maxRows)).actionGet(); return convertResponse(qresponse); } catch (final ElasticsearchException e) { LOGGER.error("Caught ElasticsearchException :: " + e.getMessage()); return new QueryOrSearchResponse(0, Collections.emptyList()); } catch (final IOException exception) { throw new RuntimeException(exception); } }
/** * Return a {@link String} that is the json representation of the provided {@link ToXContent}. * Wraps the output into an anonymous object if needed. Allows to control whether the outputted * json needs to be pretty printed and human readable. * */ public static String toString(ToXContent toXContent, boolean pretty, boolean human) { try { XContentBuilder builder = createBuilder(pretty, human); if (toXContent.isFragment()) { builder.startObject(); } toXContent.toXContent(builder, ToXContent.EMPTY_PARAMS); if (toXContent.isFragment()) { builder.endObject(); } return toString(builder); } catch (IOException e) { try { XContentBuilder builder = createBuilder(pretty, human); builder.startObject(); builder.field("error", "error building toString out of XContent: " + e.getMessage()); builder.field("stack_trace", ExceptionsHelper.stackTrace(e)); builder.endObject(); return toString(builder); } catch (IOException e2) { throw new ElasticsearchException("cannot generate error message for deserialization", e); } } }
private static String arrayToParsableString(List<String> array) { try { XContentBuilder builder = XContentBuilder.builder(XContentType.JSON.xContent()); builder.startArray(); for (String element : array) { builder.value(element); } builder.endArray(); return Strings.toString(builder); } catch (IOException ex) { throw new ElasticsearchException(ex); } }
/** * Renders a cause exception as xcontent */ protected void causeToXContent(XContentBuilder builder, Params params) throws IOException { final Throwable cause = getCause(); if (cause != null && params.paramAsBoolean(REST_EXCEPTION_SKIP_CAUSE, REST_EXCEPTION_SKIP_CAUSE_DEFAULT) == false) { builder.field("caused_by"); builder.startObject(); toXContent(builder, params, cause); builder.endObject(); } }
"for index [" + context.indexShard().shardId().getIndexName() + "]"); final int initialCapacity = nestedHit ? 1024 : Math.min(1024, source.internalSourceRef().length()); BytesStreamOutput streamOutput = new BytesStreamOutput(initialCapacity); XContentBuilder builder = new XContentBuilder(source.sourceContentType().xContent(), streamOutput); if (value != null) { builder.value(value); } else { builder.startObject(); builder.endObject(); throw new ElasticsearchException("Error filtering source", e);