@Override public Optional<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema) throws IOException { return getInsertValue(schema); }
@Override public String toString() { return "HoodieArchivedLogFile {" + super.getPath() + '}'; } }
public HoodieRecordMissingException(HoodieRecord record) { super( "Record " + record.getRecordKey() + " with partition path " + record.getPartitionPath() + " in current location " + record.getCurrentLocation() + " is not found in the partition"); } }
@Override protected void processNextRecord(HoodieRecord<? extends HoodieRecordPayload> hoodieRecord) throws IOException { String key = hoodieRecord.getRecordKey(); if (records.containsKey(key)) { // Merge and store the merged record. The HoodieRecordPayload implementation is free to decide what should be // done when a delete (empty payload) is encountered before or after an insert/update. HoodieRecordPayload combinedValue = records.get(key).getData().preCombine(hoodieRecord.getData()); records.put(key, new HoodieRecord<>(new HoodieKey(key, hoodieRecord.getPartitionPath()), combinedValue)); } else { // Put the record as is records.put(key, hoodieRecord); } }
private static HoodieRecord convertToHoodieRecords(IndexedRecord iRecord, String key, String partitionPath) { return new HoodieRecord<>(new HoodieKey(key, partitionPath), new HoodieAvroPayload(Optional.of((GenericRecord) iRecord))); }
public HoodieInsertValueGenResult(T record, Schema schema) { this.record = record; try { this.insertValue = record.getData().getInsertValue(schema); } catch (Exception e) { this.exception = Optional.of(e); } } }
/** * Add a new datafile into the file group */ public void addDataFile(HoodieDataFile dataFile) { if (!fileSlices.containsKey(dataFile.getCommitTime())) { fileSlices.put(dataFile.getCommitTime(), new FileSlice(dataFile.getCommitTime(), id)); } fileSlices.get(dataFile.getCommitTime()).setDataFile(dataFile); }
/** * Add a new log file into the group */ public void addLogFile(HoodieLogFile logFile) { if (!fileSlices.containsKey(logFile.getBaseCommitTime())) { fileSlices.put(logFile.getBaseCommitTime(), new FileSlice(logFile.getBaseCommitTime(), id)); } fileSlices.get(logFile.getBaseCommitTime()).addLogFile(logFile); }
/** * Ensure records have location field set * * @param taggedRecords Tagged Records * @param commitTime Commit Timestamp */ void checkTaggedRecords(List<HoodieRecord> taggedRecords, String commitTime) { for (HoodieRecord rec : taggedRecords) { assertTrue("Record " + rec + " found with no location.", rec.isCurrentLocationKnown()); assertEquals("All records should have commit time " + commitTime + ", since updates were made", rec.getCurrentLocation().getCommitTime(), commitTime); } }
public String getRecordKey() { assert key != null; return key.getRecordKey(); } }
public static HoodieTableMetaClient init(String basePath) throws IOException { return initTableType(getDefaultHadoopConf(), basePath, HoodieTableType.COPY_ON_WRITE); }
public static final void createInflightCleanFiles(String basePath, String... commitTimes) throws IOException { createInflightCleanFiles(basePath, HoodieTestUtils.getDefaultHadoopConf(), commitTimes); }
public Long getTotalScanTime() { Long totalScanTime = 0L; for (Map.Entry<String, List<HoodieWriteStat>> entry : partitionToWriteStats.entrySet()) { for (HoodieWriteStat writeStat : entry.getValue()) { if (writeStat.getRuntimeStats() != null) { totalScanTime += writeStat.getRuntimeStats().getTotalScanTime(); } } } return totalScanTime; }
public Long getTotalCreateTime() { Long totalCreateTime = 0L; for (Map.Entry<String, List<HoodieWriteStat>> entry : partitionToWriteStats.entrySet()) { for (HoodieWriteStat writeStat : entry.getValue()) { if (writeStat.getRuntimeStats() != null) { totalCreateTime += writeStat.getRuntimeStats().getTotalCreateTime(); } } } return totalCreateTime; }
/** * Build Compaction operation payload from Avro version for using in Spark executors * * @param hc HoodieCompactionOperation */ public static CompactionOperation buildCompactionOperation(HoodieCompactionOperation hc) { return CompactionOperation.convertFromAvroRecordInstance(hc); }
public String getPartitionPath() { assert key != null; return key.getPartitionPath(); }
public void addRollingStat(String partitionPath, HoodieRollingStat stat) { if (!partitionToRollingStats.containsKey(partitionPath)) { partitionToRollingStats.put(partitionPath, new RollingStatsHashMap<>()); } partitionToRollingStats.get(partitionPath).put(stat.getFileId(), stat); }
public static HoodieTableMetaClient init(Configuration hadoopConf, String basePath) throws IOException { return initTableType(hadoopConf, basePath, HoodieTableType.COPY_ON_WRITE); }
/** * Utility to simulate commit touching files in a partition * * @param files List of file-Ids to be touched * @param partitionPath Partition * @param commitTime Commit Timestamp * @throws IOException in case of error */ void updateAllFilesInPartition(List<String> files, String partitionPath, String commitTime) throws IOException { for (String fileId : files) { HoodieTestUtils.createDataFile(basePath, partitionPath, commitTime, fileId); } }
@Override public Optional<IndexedRecord> combineAndGetUpdateValue(IndexedRecord currentValue, Schema schema) throws IOException { return getInsertValue(schema); }