if(aliases != null && aliases.size() > 0) {
/** * Get the number of known nodes * * @return number of nodes */ public int getSize() { return nodes.size(); }
@Test @ExcludeOnTravis public void testMapping() throws Exception { configurationSource.save("foo", "bar"); refresh(); final GetMappingsResponse mappings = client.admin().indices().prepareGetMappings("stagemonitor-configuration").setTypes("configuration").get(); assertEquals(1, mappings.getMappings().size()); assertEquals("{\"configuration\":{" + "\"_all\":{\"enabled\":false}," + "\"properties\":{\"configuration\":{\"properties\":{" + "\"key\":{\"type\":\"keyword\"}," + "\"value\":{\"type\":\"keyword\"}}}}" + "}" + "}", mappings.getMappings().get("stagemonitor-configuration").get("configuration").source().toString()); } }
@Override public void clusterStateProcessed(String source, ClusterState oldState, ClusterState newState) { logger.info("recovered [{}] indices into cluster_state", newState.metaData().indices().size()); } });
static int defaultMaxConcurrentSearches(int availableProcessors, ClusterState state) { int numDateNodes = state.getNodes().getDataNodes().size(); // availableProcessors will never be larger than 32, so max defaultMaxConcurrentSearches will never be larger than 49, // but we don't know about about other search requests that are being executed so lets cap at 10 per node int defaultSearchThreadPoolSize = Math.min(ThreadPool.searchThreadPoolSize(availableProcessors), 10); return Math.max(1, numDateNodes * defaultSearchThreadPoolSize); }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(mappings.size()); for (ObjectObjectCursor<String, ImmutableOpenMap<String, MappingMetaData>> indexEntry : mappings) { out.writeString(indexEntry.key); out.writeVInt(indexEntry.value.size()); for (ObjectObjectCursor<String, MappingMetaData> typeEntry : indexEntry.value) { out.writeString(typeEntry.key); typeEntry.value.writeTo(out); } } }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(aliases.size()); for (ObjectObjectCursor<String, List<AliasMetaData>> entry : aliases) { out.writeString(entry.key); out.writeVInt(entry.value.size()); for (AliasMetaData aliasMetaData : entry.value) { aliasMetaData.writeTo(out); } } }
@Override public void writeTo(StreamOutput out) throws IOException { out.writeLong(version); out.writeVInt(indicesRouting.size()); for (ObjectCursor<IndexRoutingTable> index : indicesRouting.values()) { index.value.writeTo(out); } }
@Override public void writeTo(StreamOutput out) throws IOException { writeBlockSet(global, out); out.writeVInt(indicesBlocks.size()); for (ObjectObjectCursor<String, Set<ClusterBlock>> entry : indicesBlocks) { out.writeString(entry.key); writeBlockSet(entry.value, out); } }
private static ImmutableOpenMap<String, MappingMetaData> filterFields(ImmutableOpenMap<String, MappingMetaData> mappings, Predicate<String> fieldPredicate) throws IOException { if (fieldPredicate == MapperPlugin.NOOP_FIELD_PREDICATE) { return mappings; } ImmutableOpenMap.Builder<String, MappingMetaData> builder = ImmutableOpenMap.builder(mappings.size()); for (ObjectObjectCursor<String, MappingMetaData> cursor : mappings) { builder.put(cursor.key, filterFields(cursor.value, fieldPredicate)); } return builder.build(); // No types specified means return them all }
/** * Checks if the are replicas with the auto-expand feature that need to be adapted. * Returns a map of updates, which maps the indices to be updated to the desired number of replicas. * The map has the desired number of replicas as key and the indices to update as value, as this allows the result * of this method to be directly applied to RoutingTable.Builder#updateNumberOfReplicas. */ public static Map<Integer, List<String>> getAutoExpandReplicaChanges(MetaData metaData, DiscoveryNodes discoveryNodes) { // used for translating "all" to a number final int dataNodeCount = discoveryNodes.getDataNodes().size(); Map<Integer, List<String>> nrReplicasChanged = new HashMap<>(); for (final IndexMetaData indexMetaData : metaData) { if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { AutoExpandReplicas autoExpandReplicas = SETTING.get(indexMetaData.getSettings()); autoExpandReplicas.getDesiredNumberOfReplicas(dataNodeCount).ifPresent(numberOfReplicas -> { if (numberOfReplicas != indexMetaData.getNumberOfReplicas()) { nrReplicasChanged.computeIfAbsent(numberOfReplicas, ArrayList::new).add(indexMetaData.getIndex().getName()); } }); } } return nrReplicasChanged; } }
/** * Returns a {@link DiskUsage} for the {@link RoutingNode} using the * average usage of other nodes in the disk usage map. * @param node Node to return an averaged DiskUsage object for * @param usages Map of nodeId to DiskUsage for all known nodes * @return DiskUsage representing given node using the average disk usage */ DiskUsage averageUsage(RoutingNode node, ImmutableOpenMap<String, DiskUsage> usages) { if (usages.size() == 0) { return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", 0, 0); } long totalBytes = 0; long freeBytes = 0; for (ObjectCursor<DiskUsage> du : usages.values()) { totalBytes += du.value.getTotalBytes(); freeBytes += du.value.getFreeBytes(); } return new DiskUsage(node.nodeId(), node.node().getName(), "_na_", totalBytes / usages.size(), freeBytes / usages.size()); }
@Override public void writeTo(StreamOutput out) throws IOException { if (masterNodeId == null) { out.writeBoolean(false); } else { out.writeBoolean(true); out.writeString(masterNodeId); } out.writeVInt(nodes.size()); for (DiscoveryNode node : this) { node.writeTo(out); } }
@Override public void onMaster() { this.isMaster = true; if (logger.isTraceEnabled()) { logger.trace("I have been elected master, scheduling a ClusterInfoUpdateJob"); } try { // Submit a job that will start after DEFAULT_STARTING_INTERVAL, and reschedule itself after running threadPool.schedule(updateFrequency, executorName(), new SubmitReschedulingClusterInfoUpdatedJob()); if (clusterService.state().getNodes().getDataNodes().size() > 1) { // Submit an info update job to be run immediately threadPool.executor(executorName()).execute(() -> maybeRefresh()); } } catch (EsRejectedExecutionException ex) { if (logger.isDebugEnabled()) { logger.debug("Couldn't schedule cluster info update task - node might be shutting down", ex); } } }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(indexToSettings.size()); for (ObjectObjectCursor<String, Settings> cursor : indexToSettings) { out.writeString(cursor.key); Settings.writeSettingsToStream(cursor.value, out); } if (out.getVersion().onOrAfter(Version.V_6_4_0)) { out.writeVInt(indexToDefaultSettings.size()); for (ObjectObjectCursor<String, Settings> cursor : indexToDefaultSettings) { out.writeString(cursor.key); Settings.writeSettingsToStream(cursor.value, out); } } }
public void logMinimumMasterNodesWarningIfNecessary(ClusterState oldState, ClusterState newState) { // check if min_master_nodes setting is too low and log warning if (hasTooManyMasterNodes(oldState.nodes()) == false && hasTooManyMasterNodes(newState.nodes())) { logger.warn("value for setting \"{}\" is too low. This can result in data loss! Please set it to at least a quorum of master-" + "eligible nodes (current value: [{}], total number of master-eligible nodes used for publishing in this round: [{}])", ElectMasterService.DISCOVERY_ZEN_MINIMUM_MASTER_NODES_SETTING.getKey(), minimumMasterNodes(), newState.getNodes().getMasterNodes().size()); } }
@Override public void writeTo(StreamOutput out) throws IOException { super.writeTo(out); out.writeVInt(storeStatuses.size()); for (ObjectObjectCursor<String, ImmutableOpenIntMap<List<StoreStatus>>> indexShards : storeStatuses) { out.writeString(indexShards.key); out.writeVInt(indexShards.value.size()); for (IntObjectCursor<List<StoreStatus>> shardStatusesEntry : indexShards.value) { out.writeInt(shardStatusesEntry.key); out.writeVInt(shardStatusesEntry.value.size()); for (StoreStatus storeStatus : shardStatusesEntry.value) { storeStatus.writeTo(out); } } } out.writeVInt(failures.size()); for (Failure failure : failures) { failure.writeTo(out); } }
@Override protected void doRun() { final Snapshot snapshot = entry.snapshot(); final Repository repository = repositoriesService.repository(snapshot.getRepository()); logger.trace("[{}] finalizing snapshot in repository, state: [{}], failure[{}]", snapshot, entry.state(), failure); ArrayList<SnapshotShardFailure> shardFailures = new ArrayList<>(); for (ObjectObjectCursor<ShardId, ShardSnapshotStatus> shardStatus : entry.shards()) { ShardId shardId = shardStatus.key; ShardSnapshotStatus status = shardStatus.value; if (status.state().failed()) { shardFailures.add(new SnapshotShardFailure(status.nodeId(), shardId, status.reason())); } } SnapshotInfo snapshotInfo = repository.finalizeSnapshot( snapshot.getSnapshotId(), entry.indices(), entry.startTime(), failure, entry.shards().size(), Collections.unmodifiableList(shardFailures), entry.getRepositoryStateId(), entry.includeGlobalState()); removeSnapshotFromClusterState(snapshot, snapshotInfo, null); logger.info("snapshot [{}] completed with state [{}]", snapshot, snapshotInfo.state()); }
/** * {@inheritDoc} */ @Override public void writeTo(StreamOutput out) throws IOException { out.writeVInt(entries.size()); for (ObjectCursor<Entry> v : entries.values()) { Entry entry = v.value; if (out.getVersion().onOrAfter(Version.V_6_6_0)) { out.writeString(entry.uuid); } entry.snapshot().writeTo(out); out.writeByte(entry.state().value()); out.writeVInt(entry.indices().size()); for (String index : entry.indices()) { out.writeString(index); } out.writeVInt(entry.shards().size()); for (ObjectObjectCursor<ShardId, ShardRestoreStatus> shardEntry : entry.shards()) { shardEntry.key.writeTo(out); shardEntry.value.writeTo(out); } } }
@Override public void writeTo(StreamOutput out) throws IOException { out.writeVLong(count); out.writeLong(memoryInBytes); out.writeLong(termsMemoryInBytes); out.writeLong(storedFieldsMemoryInBytes); out.writeLong(termVectorsMemoryInBytes); out.writeLong(normsMemoryInBytes); out.writeLong(pointsMemoryInBytes); out.writeLong(docValuesMemoryInBytes); out.writeLong(indexWriterMemoryInBytes); out.writeLong(versionMapMemoryInBytes); out.writeLong(bitsetMemoryInBytes); out.writeLong(maxUnsafeAutoIdTimestamp); out.writeVInt(fileSizes.size()); for (ObjectObjectCursor<String, Long> entry : fileSizes) { out.writeString(entry.key); out.writeLong(entry.value.longValue()); } } }