public static TypedProperties readConfig(InputStream in) throws IOException { TypedProperties defaults = new TypedProperties(); defaults.load(in); return defaults; } }
public static List<String> getAllPartitionPaths(FileSystem fs, String basePathStr, boolean assumeDatePartitioning) throws IOException { if (assumeDatePartitioning) { return getAllFoldersThreeLevelsDown(fs, basePathStr); } else { return getAllFoldersWithPartitionMetaFile(fs, basePathStr); } }
/** * Compute a bytes representation of the payload by serializing the contents This is used to estimate the size of the * payload (either in memory or when written to disk) */ public static <R> long computePayloadSize(R value, SizeEstimator<R> valueSizeEstimator) throws IOException { return valueSizeEstimator.sizeEstimate(value); }
public static List<IndexedRecord> generateEvolvedTestRecords(int from, int limit) throws IOException, URISyntaxException { return toRecords(getSimpleSchema(), getEvolvedSchema(), from, limit); }
public static HoodieCompactionPlan deserializeCompactionPlan(byte[] bytes) throws IOException { return deserializeAvroMetadata(bytes, HoodieCompactionPlan.class); }
public static final String getDataFilePath(String basePath, String partitionPath, String commitTime, String fileID) throws IOException { return basePath + "/" + partitionPath + "/" + FSUtils .makeDataFileName(commitTime, DEFAULT_TASK_PARTITIONID, fileID); }
public String getString(String property) { checkKey(property); return getProperty(property); }
public String getFileId() { return FSUtils.getFileIdFromLogPath(path); }
public int getLogVersion() { return FSUtils.getFileVersionFromLog(path); }
public static Optional<byte[]> serializeCleanMetadata(HoodieCleanMetadata metadata) throws IOException { return serializeAvroMetadata(metadata, HoodieCleanMetadata.class); }
static String makeFileNameAsInflight(String fileName) { return StringUtils.join(fileName, HoodieTimeline.INFLIGHT_EXTENSION); } }
/** * Write Value and other metadata necessary to disk. Each entry has the following sequence of data <p> * |crc|timestamp|sizeOfKey|SizeOfValue|key|value| */ public static long spillToDisk(SizeAwareDataOutputStream outputStream, DiskBasedMap.FileEntry fileEntry) throws IOException { return spill(outputStream, fileEntry); }
public static HoodieCleanMetadata deserializeHoodieCleanMetadata(byte[] bytes) throws IOException { return deserializeAvroMetadata(bytes, HoodieCleanMetadata.class); }
public static TypedProperties readConfig(InputStream in) throws IOException { TypedProperties defaults = new TypedProperties(); defaults.load(in); return defaults; } }
public static Optional<byte[]> serializeCompactionPlan(HoodieCompactionPlan compactionWorkload) throws IOException { return serializeAvroMetadata(compactionWorkload, HoodieCompactionPlan.class); }
static String makeCommitFileName(String commitTime) { return StringUtils.join(commitTime, HoodieTimeline.COMMIT_EXTENSION); }
public static HoodieSavepointMetadata deserializeHoodieSavepointMetadata(byte[] bytes) throws IOException { return deserializeAvroMetadata(bytes, HoodieSavepointMetadata.class); }
public static Optional<byte[]> serializeSavepointMetadata(HoodieSavepointMetadata metadata) throws IOException { return serializeAvroMetadata(metadata, HoodieSavepointMetadata.class); }
static String makeInflightCleanerFileName(String instant) { return StringUtils.join(instant, HoodieTimeline.INFLIGHT_CLEAN_EXTENSION); }
public static Optional<byte[]> serializeRollbackMetadata( HoodieRollbackMetadata rollbackMetadata) throws IOException { return serializeAvroMetadata(rollbackMetadata, HoodieRollbackMetadata.class); }