/** * Returns a repository with a given name or null if such repository doesn't exist * * @param name name of repository * @return repository metadata */ public RepositoryMetaData repository(String name) { for (RepositoryMetaData repository : repositories) { if (name.equals(repository.name())) { return repository; } } return null; }
@Override public String toString() { return "BlobStoreRepository[" + "[" + metadata.name() + "], [" + blobStore() + ']' + ']'; }
private long listBlobsToGetLatestIndexId() throws IOException { Map<String, BlobMetaData> blobs = blobContainer().listBlobsByPrefix(INDEX_FILE_PREFIX); long latest = RepositoryData.EMPTY_REPO_GEN; if (blobs.isEmpty()) { // no snapshot index blobs have been written yet return latest; } for (final BlobMetaData blobMetaData : blobs.values()) { final String blobName = blobMetaData.name(); try { final long curr = Long.parseLong(blobName.substring(INDEX_FILE_PREFIX.length())); latest = Math.max(latest, curr); } catch (NumberFormatException nfe) { // the index- blob wasn't of the format index-N where N is a number, // no idea what this blob is but it doesn't belong in the repository! logger.debug("[{}] Unknown blob in the repository: {}", metadata.name(), blobName); } } return latest; }
/** * Serializes information about a single repository * * @param repository repository metadata * @param builder XContent builder * @param params serialization parameters */ public static void toXContent(RepositoryMetaData repository, XContentBuilder builder, ToXContent.Params params) throws IOException { builder.startObject(repository.name()); builder.field("type", repository.type()); builder.startObject("settings"); repository.settings().toXContent(builder, params); builder.endObject(); builder.endObject(); } }
@Override public SnapshotInfo getSnapshotInfo(final SnapshotId snapshotId) { try { return snapshotFormat.read(blobContainer(), snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException | NotXContentException ex) { throw new SnapshotException(metadata.name(), snapshotId, "failed to get snapshots", ex); } }
@Override public MetaData getSnapshotGlobalMetaData(final SnapshotId snapshotId) { try { return globalMetaDataFormat.read(blobContainer(), snapshotId.getUUID()); } catch (NoSuchFileException ex) { throw new SnapshotMissingException(metadata.name(), snapshotId, ex); } catch (IOException ex) { throw new SnapshotException(metadata.name(), snapshotId, "failed to read global metadata", ex); } }
/** * Creates repository holder. This method starts the repository */ private Repository createRepository(RepositoryMetaData repositoryMetaData, Map<String, Repository.Factory> factories) { logger.debug("creating repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()); Repository.Factory factory = factories.get(repositoryMetaData.type()); if (factory == null) { throw new RepositoryException(repositoryMetaData.name(), "repository type [" + repositoryMetaData.type() + "] does not exist"); } try { Repository repository = factory.create(repositoryMetaData, factories::get); repository.start(); return repository; } catch (Exception e) { logger.warn(new ParameterizedMessage("failed to create repository [{}][{}]", repositoryMetaData.type(), repositoryMetaData.name()), e); throw new RepositoryException(repositoryMetaData.name(), "failed to create repository", e); } }
/** * Loads information about shard snapshot */ BlobStoreIndexShardSnapshot loadSnapshot() { try { return indexShardSnapshotFormat.read(blobContainer, snapshotId.getUUID()); } catch (IOException ex) { throw new SnapshotException(metadata.name(), snapshotId, "failed to read shard snapshot file for " + shardId, ex); } }
/** Closes the given repository. */ private void closeRepository(Repository repository) { logger.debug("closing repository [{}][{}]", repository.getMetadata().type(), repository.getMetadata().name()); repository.close(); }
@Override public ClusterState execute(ClusterState currentState) { ensureRepositoryNotInUse(currentState, request.name); MetaData metaData = currentState.metaData(); MetaData.Builder mdBuilder = MetaData.builder(currentState.metaData()); RepositoriesMetaData repositories = metaData.custom(RepositoriesMetaData.TYPE); if (repositories != null && repositories.repositories().size() > 0) { List<RepositoryMetaData> repositoriesMetaData = new ArrayList<>(repositories.repositories().size()); boolean changed = false; for (RepositoryMetaData repositoryMetaData : repositories.repositories()) { if (Regex.simpleMatch(request.name, repositoryMetaData.name())) { logger.info("delete repository [{}]", repositoryMetaData.name()); changed = true; } else { repositoriesMetaData.add(repositoryMetaData); } } if (changed) { repositories = new RepositoriesMetaData(repositoriesMetaData); mdBuilder.putCustom(RepositoriesMetaData.TYPE, repositories); return ClusterState.builder(currentState).metaData(mdBuilder).build(); } } if (Regex.isMatchAllPattern(request.name)) { // we use a wildcard so we don't barf if it's not present. return currentState; } throw new RepositoryMissingException(request.name); }
/** * maintains single lazy instance of {@link BlobStore} */ protected BlobStore blobStore() { assertSnapshotOrGenericThread(); BlobStore store = blobStore.get(); if (store == null) { synchronized (lock) { store = blobStore.get(); if (store == null) { if (lifecycle.started() == false) { throw new RepositoryException(metadata.name(), "repository is not in started state"); } try { store = createBlobStore(); } catch (RepositoryException e) { throw e; } catch (Exception e) { throw new RepositoryException(metadata.name(), "cannot create blob store" , e); } blobStore.set(store); } } } return store; }
throw new RepositoryException(metadata.name(), "concurrent modification of the index-N file, expected current generation [" + repositoryStateId + "], actual current generation [" + currentGen + "] - possibly due to simultaneous snapshot deletion requests"); logger.debug("Repository [{}] writing new index generational blob [{}]", metadata.name(), indexBlob); writeAtomic(indexBlob, snapshotsBytes, true); genBytes = bStream.bytes(); logger.debug("Repository [{}] updating index.latest with generation [{}]", metadata.name(), newGen); writeAtomic(INDEX_LATEST_BLOB, genBytes, false);
private Table buildTable(RestRequest req, GetRepositoriesResponse getRepositoriesResponse) { Table table = getTableWithHeader(req); for (RepositoryMetaData repositoryMetaData : getRepositoriesResponse.repositories()) { table.startRow(); table.addCell(repositoryMetaData.name()); table.addCell(repositoryMetaData.type()); table.endRow(); } return table; } }
logger.warn("the repository location is missing, it should point to a shared file system location" + " that is available on all master and data nodes"); throw new RepositoryException(metadata.name(), "missing location"); logger.warn("The specified location [{}] doesn't start with any " + "repository paths specified by the path.repo setting: [{}] ", location, environment.repoFiles()); throw new RepositoryException(metadata.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo"); } else { logger.warn("The specified location [{}] should start with a repository path specified by" + " the path.repo setting, but the path.repo setting was not set on this node", location); throw new RepositoryException(metadata.name(), "location [" + location + "] doesn't match any of the locations specified by path.repo because this setting is empty");
@Override public void initializeSnapshot(SnapshotId snapshotId, List<IndexId> indices, MetaData clusterMetaData) { if (isReadOnly()) { throw new RepositoryException(metadata.name(), "cannot create snapshot in a readonly repository"); } try { final String snapshotName = snapshotId.getName(); // check if the snapshot name already exists in the repository final RepositoryData repositoryData = getRepositoryData(); if (repositoryData.getAllSnapshotIds().stream().anyMatch(s -> s.getName().equals(snapshotName))) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } if (snapshotFormat.exists(blobContainer(), snapshotId.getUUID())) { throw new InvalidSnapshotNameException(metadata.name(), snapshotId.getName(), "snapshot with the same name already exists"); } // Write Global MetaData globalMetaDataFormat.write(clusterMetaData, blobContainer(), snapshotId.getUUID()); // write the index metadata for each index in the snapshot for (IndexId index : indices) { final IndexMetaData indexMetaData = clusterMetaData.index(index.getName()); final BlobPath indexPath = basePath().add("indices").add(index.getId()); final BlobContainer indexMetaDataBlobContainer = blobStore().blobContainer(indexPath); indexMetaDataFormat.write(indexMetaData, indexMetaDataBlobContainer, snapshotId.getUUID()); } } catch (IOException ex) { throw new SnapshotCreationException(metadata.name(), snapshotId, ex); } }
@Override public void endVerification(String seed) { if (isReadOnly() == false) { try { blobStore().delete(basePath().add(testBlobPrefix(seed))); } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "cannot delete test data at " + basePath(), exp); } } }
@Override public void verify(String seed, DiscoveryNode localNode) { assertSnapshotOrGenericThread(); if (isReadOnly()) { try { latestIndexBlobId(); } catch (IOException e) { throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on node " + localNode, e); } } else { BlobContainer testBlobContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); if (testBlobContainer.blobExists("master.dat")) { try { BytesArray bytes = new BytesArray(seed); try (InputStream stream = bytes.streamInput()) { testBlobContainer.writeBlob("data-" + localNode.getId() + ".dat", stream, bytes.length(), true); } } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "store location [" + blobStore() + "] is not accessible on the node [" + localNode + "]", exp); } } else { throw new RepositoryVerificationException(metadata.name(), "a file written by master to the store [" + blobStore() + "] cannot be accessed on the node [" + localNode + "]. " + "This might indicate that the store [" + blobStore() + "] is not shared between this node and the master node or " + "that permissions on the store don't allow reading files written by the master node"); } } }
@Override public String startVerification() { try { if (isReadOnly()) { // It's readonly - so there is not much we can do here to verify it apart from reading the blob store metadata latestIndexBlobId(); return "read-only"; } else { String seed = UUIDs.randomBase64UUID(); byte[] testBytes = Strings.toUTF8Bytes(seed); BlobContainer testContainer = blobStore().blobContainer(basePath().add(testBlobPrefix(seed))); String blobName = "master.dat"; BytesArray bytes = new BytesArray(testBytes); try (InputStream stream = bytes.streamInput()) { testContainer.writeBlobAtomic(blobName, stream, bytes.length(), true); } return seed; } } catch (IOException exp) { throw new RepositoryVerificationException(metadata.name(), "path " + basePath() + " is not accessible on master node", exp); } }