/** * Get the real time view of the file system for this table */ public TableFileSystemView.RealtimeView getRTFileSystemView() { return new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline().filterCompletedAndCompactionInstants()); }
/** * Get the real time view of the file system for this table */ public TableFileSystemView.RealtimeView getRTFileSystemView() { return new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline().filterCompletedAndCompactionInstants()); }
/** * Renames delta files to make file-slices consistent with the timeline as dictated by Hoodie metadata. * Use when compaction unschedule fails partially. * * This operation MUST be executed with compactions and writer turned OFF. * @param compactionInstant Compaction Instant to be repaired * @param dryRun Dry Run Mode */ public List<RenameOpResult> repairCompaction(String compactionInstant, int parallelism, boolean dryRun) throws Exception { HoodieTableMetaClient metaClient = new HoodieTableMetaClient(jsc.hadoopConfiguration(), basePath); List<ValidationOpResult> validationResults = validateCompactionPlan(metaClient, compactionInstant, parallelism); List<ValidationOpResult> failed = validationResults.stream() .filter(v -> !v.isSuccess()).collect(Collectors.toList()); if (failed.isEmpty()) { return new ArrayList<>(); } final HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); List<Pair<HoodieLogFile, HoodieLogFile>> renameActions = failed.stream().flatMap(v -> getRenamingActionsToAlignWithCompactionOperation(metaClient, compactionInstant, v.getOperation(), Optional.of(fsView)).stream()).collect(Collectors.toList()); return runRenamingOps(metaClient, renameActions, parallelism, dryRun); }
Optional<HoodieTableFileSystemView> fsViewOpt) { HoodieTableFileSystemView fileSystemView = fsViewOpt.isPresent() ? fsViewOpt.get() : new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); HoodieInstant lastInstant = metaClient.getCommitsAndCompactionTimeline().lastInstant().get(); FileSlice merged = fileSystemView.getLatestMergedFileSlicesBeforeOrOn(op.getPartitionPath(), lastInstant.getTimestamp())
/** * Validate all compaction operations in a compaction plan. Verifies the file-slices are consistent with corresponding * compaction operations. * * @param metaClient Hoodie Table Meta Client * @param compactionInstant Compaction Instant */ public List<ValidationOpResult> validateCompactionPlan(HoodieTableMetaClient metaClient, String compactionInstant, int parallelism) throws IOException { HoodieCompactionPlan plan = getCompactionPlan(metaClient, compactionInstant); HoodieTableFileSystemView fsView = new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); if (plan.getOperations() != null) { List<CompactionOperation> ops = plan.getOperations().stream() .map(CompactionOperation::convertFromAvroRecordInstance).collect(Collectors.toList()); return jsc.parallelize(ops, parallelism).map(op -> { try { return validateCompactionOperation(metaClient, compactionInstant, op, Optional.of(fsView)); } catch (IOException e) { throw new HoodieIOException(e.getMessage(), e); } }).collect(); } return new ArrayList<>(); }
throws IOException { HoodieTableFileSystemView fileSystemView = fsViewOpt.isPresent() ? fsViewOpt.get() : new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); java.util.Optional<HoodieInstant> lastInstant = metaClient.getCommitsAndCompactionTimeline().lastInstant(); try { if (lastInstant.isPresent()) {
Optional<HoodieTableFileSystemView> fsViewOpt, boolean skipValidation) throws IOException { HoodieTableFileSystemView fsView = fsViewOpt.isPresent() ? fsViewOpt.get() : new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); HoodieCompactionPlan plan = getCompactionPlan(metaClient, compactionInstant); if (plan.getOperations() != null) {
List<Pair<HoodieLogFile, HoodieLogFile>> result = new ArrayList<>(); HoodieTableFileSystemView fileSystemView = fsViewOpt.isPresent() ? fsViewOpt.get() : new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); if (!skipValidation) { validateCompactionOperation(metaClient, compactionInstant, operation, Optional.of(fileSystemView)); HoodieInstant lastInstant = metaClient.getCommitsAndCompactionTimeline().lastInstant().get(); FileSlice merged = fileSystemView.getLatestMergedFileSlicesBeforeOrOn(operation.getPartitionPath(), lastInstant.getTimestamp())
HoodieTimeline commitTimeline = table.getMetaClient().getCommitsAndCompactionTimeline();
HoodieTimeline commitTimeline = table.getMetaClient().getCommitsAndCompactionTimeline();
new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); Set<HoodieLogFile> expLogFilesToBeRenamed = fsView.getLatestFileSlices(HoodieTestUtils.DEFAULT_PARTITION_PATHS[0]) .filter(fs -> fs.getBaseInstantTime().equals(compactionInstant)) new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline());
new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline()); Set<HoodieLogFile> expLogFilesToBeRenamed = fsView.getLatestFileSlices(HoodieTestUtils.DEFAULT_PARTITION_PATHS[0]) .filter(fs -> fs.getBaseInstantTime().equals(compactionInstant)) new HoodieTableFileSystemView(metaClient, metaClient.getCommitsAndCompactionTimeline());