Refine search
private boolean getOrReplaceAllIndices(final Object request, final IndicesProvider provider, boolean allowEmptyIndices) { if(log.isTraceEnabled()) { log.trace("getOrReplaceAllIndices() for "+request.getClass()); Index concreteIndex = pmr.getConcreteIndex(); if(concreteIndex != null && (pmr.indices() == null || pmr.indices().length == 0)) { String[] newIndices = provider.provide(new String[]{concreteIndex.getName()}, request, true); if(checkIndices(request, newIndices, true, allowEmptyIndices) == false) { return false; log.warn("snapshot repository '" + restoreRequest.repository() + "', snapshot '" + restoreRequest.snapshot() + "' not found"); provider.provide(new String[]{"*"}, request, false); } else {
public Index build() { return new Index(name, uuid); } }
@Override public boolean equals(final Object other) { if (this == other) { return true; } if (other == null || getClass() != other.getClass()) { return false; } @SuppressWarnings("unchecked") Tombstone that = (Tombstone) other; return index.equals(that.index) && deleteDateInMillis == that.deleteDateInMillis; }
public IndexMetaData index(Index index) { IndexMetaData metaData = index(index.getName()); if (metaData != null && metaData.getIndexUUID().equals(index.getUUID())) { return metaData; } return null; }
/** * Cleans dangling indices if they are already allocated on the provided meta data. */ void cleanupAllocatedDangledIndices(MetaData metaData) { for (Index index : danglingIndices.keySet()) { final IndexMetaData indexMetaData = metaData.index(index); if (indexMetaData != null && indexMetaData.getIndex().getName().equals(index.getName())) { if (indexMetaData.getIndex().getUUID().equals(index.getUUID()) == false) { logger.warn("[{}] can not be imported as a dangling index, as there is already another index " + "with the same name but a different uuid. local index will be ignored (but not deleted)", index); } else { logger.debug("[{}] no longer dangling (created), removing from dangling list", index); } danglingIndices.remove(index); } } }
assert newIndexMetaData.getIndex().equals(index()) : "index mismatch: expected " + index() + " but was " + newIndexMetaData.getIndex(); logger.warn(() -> new ParameterizedMessage("[{}] failed to apply mappings", index()), e); throw e; CompressedXContent incomingMappingSource = newIndexMetaData.mapping(mappingType).source(); if (logger.isDebugEnabled() && incomingMappingSource.compressed().length < 512) { logger.debug("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string()); } else if (logger.isTraceEnabled()) { logger.trace("[{}] {} mapping [{}], source [{}]", index(), op, mappingType, incomingMappingSource.string());
private Resolved resolveIndexPatterns(final String... requestedPatterns) { if(log.isTraceEnabled()) { log.trace("resolve requestedPatterns: "+Arrays.toString(requestedPatterns)); final SortedMap<String, AliasOrIndex> lookup = state.metaData().getAliasAndIndexLookup(); final Set<String> aliases = lookup.entrySet().stream().filter(e->e.getValue().isAlias()).map(e->e.getKey()).collect(Collectors.toSet()); try { _indices = new ArrayList<>(Arrays.asList(resolver.concreteIndexNames(state, IndicesOptions.fromOptions(false, true, true, false), requestedPatterns))); if (log.isDebugEnabled()) { log.debug("Resolved pattern {} to {}", requestedPatterns, _indices); Set<String> doubleIndices = lookup.get(al).getIndices().stream().map(a->a.getIndex().getName()).collect(Collectors.toSet()); _indices.removeAll(doubleIndices);
@Override public ClusterState execute(ClusterState currentState) { if (currentState.blocks().disableStatePersistence()) { return currentState; MetaData.Builder metaData = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocks = ClusterBlocks.builder().blocks(currentState.blocks()); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); final Version minIndexCompatibilityVersion = currentState.getNodes().getMaxNodeVersion() if (currentState.metaData().hasIndex(indexMetaData.getIndex().getName())) { continue; if (currentState.metaData().hasAlias(indexMetaData.getIndex().getName())) { logger.warn("ignoring dangled index [{}] on node [{}] due to an existing alias with the same name", indexMetaData.getIndex(), request.fromNode); continue; } catch (Exception ex) { logger.warn(() -> new ParameterizedMessage("found dangled index [{}] on node [{}]. This index cannot be " + "upgraded to the latest version, adding as closed", indexMetaData.getIndex(), request.fromNode), ex); upgradedIndexMetaData = IndexMetaData.builder(indexMetaData).state(IndexMetaData.State.CLOSE)
final Set<String> excludeIndexPathIds = new HashSet<>(metaData.indices().size() + danglingIndices.size()); for (ObjectCursor<IndexMetaData> cursor : metaData.indices().values()) { excludeIndexPathIds.add(cursor.value.getIndex().getUUID()); final List<IndexMetaData> indexMetaDataList = metaStateService.loadIndicesStates(excludeIndexPathIds::contains); Map<Index, IndexMetaData> newIndices = new HashMap<>(indexMetaDataList.size()); final IndexGraveyard graveyard = metaData.indexGraveyard(); for (IndexMetaData indexMetaData : indexMetaDataList) { if (metaData.hasIndex(indexMetaData.getIndex().getName())) { logger.warn("[{}] can not be imported as a dangling index, as index with same name already exists in cluster metadata", indexMetaData.getIndex()); } else if (graveyard.containsIndex(indexMetaData.getIndex())) { logger.warn("[{}] can not be imported as a dangling index, as an index with the same name and UUID exist in the " + "index tombstones. This situation is likely caused by copying over the data directory for an index " + "that was previously deleted.", indexMetaData.getIndex()); } else { logger.info("[{}] dangling index exists on local file system, but not in cluster metadata, " + "auto import to cluster state", indexMetaData.getIndex()); newIndices.put(indexMetaData.getIndex(), indexMetaData);
public ClusterState closeIndices(ClusterState currentState, final Index[] indices, String indicesAsString) { Set<IndexMetaData> indicesToClose = new HashSet<>(); for (Index index : indices) { final IndexMetaData indexMetaData = currentState.metaData().getIndexSafe(index); if (indexMetaData.getState() != IndexMetaData.State.CLOSE) { indicesToClose.add(indexMetaData); logger.info("closing indices [{}]", indicesAsString); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); ClusterBlocks.Builder blocksBuilder = ClusterBlocks.builder() .blocks(currentState.blocks()); for (IndexMetaData openIndexMetadata : indicesToClose) { final String indexName = openIndexMetadata.getIndex().getName(); mdBuilder.put(IndexMetaData.builder(openIndexMetadata).state(IndexMetaData.State.CLOSE)); blocksBuilder.addIndexBlock(indexName, INDEX_CLOSED_BLOCK); RoutingTable.Builder rtBuilder = RoutingTable.builder(currentState.routingTable()); for (IndexMetaData index : indicesToClose) { rtBuilder.remove(index.getIndex().getName());
final MetaData meta = currentState.metaData(); final Set<IndexMetaData> metaDatas = indices.stream().map(i -> meta.getIndexSafe(i)).collect(toSet()); RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(meta); ClusterBlocks.Builder clusterBlocksBuilder = ClusterBlocks.builder().blocks(currentState.blocks()); String indexName = index.getName(); logger.info("{} deleting index", index); routingTableBuilder.remove(indexName); clusterBlocksBuilder.removeIndexBlocks(indexName); logger.trace("{} tombstones purged from the cluster state. Previous tombstone size: {}. Current tombstone size: {}.", graveyardBuilder.getNumPurged(), previousGraveyardSize, currentGraveyard.getTombstones().size());
private IndexSearcherWrapper loadFlsDlsIndexSearcherWrapper(final IndexService indexService, final ComplianceIndexingOperationListener ciol, final ComplianceConfig complianceConfig) { try { IndexSearcherWrapper flsdlsWrapper = (IndexSearcherWrapper) dlsFlsConstructor .newInstance(indexService, settings, Objects.requireNonNull(adminDns), Objects.requireNonNull(cs), Objects.requireNonNull(auditLog), Objects.requireNonNull(ciol), Objects.requireNonNull(complianceConfig)); if(log.isDebugEnabled()) { log.debug("FLS/DLS enabled for index {}", indexService.index().getName()); } return flsdlsWrapper; } catch(Exception ex) { throw new RuntimeException("Failed to enable FLS/DLS", ex); } }
private RecoveryResponse recover(final StartRecoveryRequest request) throws IOException { final IndexService indexService = indicesService.indexServiceSafe(request.shardId().getIndex()); final IndexShard shard = indexService.getShard(request.shardId().id()); final ShardRouting routingEntry = shard.routingEntry(); if (routingEntry.primary() == false || routingEntry.active() == false) { throw new DelayRecoveryException("source shard [" + routingEntry + "] is not an active primary"); } if (request.isPrimaryRelocation() && (routingEntry.relocating() == false || routingEntry.relocatingNodeId().equals(request.targetNode().getId()) == false)) { logger.debug("delaying recovery of {} as source shard is not marked yet as relocating to {}", request.shardId(), request.targetNode()); throw new DelayRecoveryException("source shard is not marked yet as relocating to [" + request.targetNode() + "]"); } RecoverySourceHandler handler = ongoingRecoveries.addNewRecovery(request, shard); logger.trace("[{}][{}] starting recovery to {}", request.shardId().getIndex().getName(), request.shardId().id(), request.targetNode()); try { return handler.recoverToTarget(); } finally { ongoingRecoveries.remove(shard, handler); } }
final ClusterState previousState = event.previousState(); final ClusterState state = event.state(); final String localNodeId = state.nodes().getLocalNodeId(); assert localNodeId != null; if (logger.isDebugEnabled()) { logger.debug("[{}] cleaning index, no longer part of the metadata", index); indexSettings = indexService.getIndexSettings(); indicesService.removeIndex(index, DELETED, "index no longer part of the metadata"); } else if (previousState.metaData().hasIndex(index.getName())) { final IndexMetaData metaData = previousState.metaData().index(index); indexSettings = new IndexSettings(metaData, settings); indicesService.deleteUnassignedIndex("deleted index was not assigned to local node", metaData, state);
@Override public ClusterState execute(ClusterState currentState) { RoutingTable.Builder routingTableBuilder = RoutingTable.builder(currentState.routingTable()); MetaData.Builder metaDataBuilder = MetaData.builder(currentState.metaData()); for (int i = 0; i < request.indices().length; i++) { Index index = request.indices()[i]; actualIndices[i] = index.getName(); final IndexMetaData metaData = currentState.metaData().getIndexSafe(index); if (metaData.getState() == IndexMetaData.State.OPEN) { openIndices.add(index); } else { logger.info("updating number_of_replicas to [{}] for indices {}", updatedNumberOfReplicas, actualIndices); IndexMetaData indexMetaData = metaDataBuilder.getSafe(index); Settings.Builder updates = Settings.builder(); Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()); if (indexScopedSettings.updateDynamicSettings(openSettings, indexSettings, updates, index.getName())) { if (preserveExisting) { indexSettings.put(indexMetaData.getSettings()); Settings.Builder updates = Settings.builder(); Settings.Builder indexSettings = Settings.builder().put(indexMetaData.getSettings()); if (indexScopedSettings.updateSettings(closedSettings, indexSettings, updates, index.getName())) { if (preserveExisting) { indexSettings.put(indexMetaData.getSettings());
log.debug("Handle complianceConfig="+complianceConfig+"/dlsFlsAvailable: "+dlsFlsAvailable+"/auditLog="+auditLog.getClass()+" for onIndexModule() of index "+indexModule.getIndex().getName()); if (dlsFlsAvailable) { if(complianceConfig.writeHistoryEnabledForIndex(indexModule.getIndex().getName())) { ciol = ReflectionHelper.instantiateComplianceListener(complianceConfig, Objects.requireNonNull(auditLog)); indexModule.addIndexOperationListener(ciol);
private void createIndices(final ClusterState state) { RoutingNode localRoutingNode = state.getRoutingNodes().node(state.nodes().getLocalNodeId()); if (localRoutingNode == null) { return; final IndexMetaData indexMetaData = state.metaData().index(index); logger.debug("[{}] creating index", index); if (indexService.updateMapping(null, indexMetaData) && sendRefreshMapping) { nodeMappingRefreshAction.nodeMappingRefresh(state.nodes().getMasterNode(), new NodeMappingRefreshAction.NodeMappingRefreshRequest(indexMetaData.getIndex().getName(), indexMetaData.getIndexUUID(), state.nodes().getLocalNodeId()) );
/** * Deletes an index that is not assigned to this node. This method cleans up all disk folders relating to the index * but does not deal with in-memory structures. For those call {@link #removeIndex(Index, IndexRemovalReason, String)} */ @Override public void deleteUnassignedIndex(String reason, IndexMetaData metaData, ClusterState clusterState) { if (nodeEnv.hasNodeFile()) { String indexName = metaData.getIndex().getName(); try { if (clusterState.metaData().hasIndex(indexName)) { final IndexMetaData index = clusterState.metaData().index(indexName); throw new IllegalStateException("Can't delete unassigned index store for [" + indexName + "] - it's still part of " + "the cluster state [" + index.getIndexUUID() + "] [" + metaData.getIndexUUID() + "]"); } deleteIndexStore(reason, metaData, clusterState); } catch (Exception e) { logger.warn(() -> new ParameterizedMessage("[{}] failed to delete unassigned index (reason [{}])", metaData.getIndex(), reason), e); } } }
for (ShardUpgradeResult result : shardUpgradeResults) { successfulShards++; String index = result.getShardId().getIndex().getName(); if (result.primary()) { Integer count = successfulPrimaryShards.get(index); MetaData metaData = clusterState.metaData(); for (Map.Entry<String, Tuple<Version, org.apache.lucene.util.Version>> versionEntry : versions.entrySet()) { String index = versionEntry.getKey(); Integer primaryCount = successfulPrimaryShards.get(index); int expectedPrimaryCount = metaData.index(index).getNumberOfShards(); if (primaryCount == metaData.index(index).getNumberOfShards()) { updatedVersions.put(index, new Tuple<>(versionEntry.getValue().v1(), versionEntry.getValue().v2().toString())); } else { logger.warn("Not updating settings for the index [{}] because upgraded of some primary shards failed - " + "expected[{}], received[{}]", index, expectedPrimaryCount, primaryCount == null ? 0 : primaryCount);