static void createCOWDataset(String commitTime, int numberOfPartitions) throws IOException, InitializationError, URISyntaxException, InterruptedException { Path path = new Path(hiveSyncConfig.basePath); FileUtils.deleteDirectory(new File(hiveSyncConfig.basePath)); HoodieTableMetaClient .initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.COPY_ON_WRITE, hiveSyncConfig.tableName, HoodieAvroPayload.class.getName()); boolean result = fileSystem.mkdirs(path); checkResult(result); DateTime dateTime = DateTime.now(); HoodieCommitMetadata commitMetadata = createPartitions(numberOfPartitions, true, dateTime, commitTime); createdTablesSet.add(hiveSyncConfig.databaseName + "." + hiveSyncConfig.tableName); createCommitFile(commitMetadata, commitTime); }
static void clear() throws IOException { fileSystem.delete(new Path(hiveSyncConfig.basePath), true); HoodieTableMetaClient .initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.COPY_ON_WRITE, hiveSyncConfig.tableName, HoodieAvroPayload.class.getName()); HoodieHiveClient client = new HoodieHiveClient(hiveSyncConfig, hiveServer.getHiveConf(), fileSystem); for (String tableName : createdTablesSet) { client.updateHiveSQL("drop table if exists " + tableName); } createdTablesSet.clear(); client.updateHiveSQL("drop database if exists " + hiveSyncConfig.databaseName); client.updateHiveSQL("create database " + hiveSyncConfig.databaseName); }
HoodieTableMetaClient.initTableType(jssc.hadoopConfiguration(), cfg.targetBasePath, cfg.storageType, cfg.targetTableName, "archived");
HoodieTableMetaClient.initTableType(jssc.hadoopConfiguration(), cfg.targetBasePath, cfg.storageType, cfg.targetTableName, "archived");
static void createMORDataset(String commitTime, String deltaCommitTime, int numberOfPartitions) throws IOException, InitializationError, URISyntaxException, InterruptedException { Path path = new Path(hiveSyncConfig.basePath); FileUtils.deleteDirectory(new File(hiveSyncConfig.basePath)); HoodieTableMetaClient .initTableType(configuration, hiveSyncConfig.basePath, HoodieTableType.MERGE_ON_READ, hiveSyncConfig.tableName, HoodieAvroPayload.class.getName()); boolean result = fileSystem.mkdirs(path); checkResult(result); DateTime dateTime = DateTime.now(); HoodieCommitMetadata commitMetadata = createPartitions(numberOfPartitions, true, dateTime, commitTime); createdTablesSet.add(hiveSyncConfig.databaseName + "." + hiveSyncConfig.tableName); createdTablesSet.add(hiveSyncConfig.databaseName + "." + hiveSyncConfig.tableName + HiveSyncTool.SUFFIX_REALTIME_TABLE); HoodieCommitMetadata compactionMetadata = new HoodieCommitMetadata(); commitMetadata.getPartitionToWriteStats().forEach( (key, value) -> value.stream().forEach(l -> compactionMetadata.addWriteStat(key, l))); createCompactionCommitFile(compactionMetadata, commitTime); // Write a delta commit HoodieCommitMetadata deltaMetadata = createLogFiles(commitMetadata.getPartitionToWriteStats(), true); createDeltaCommitFile(deltaMetadata, deltaCommitTime); }
HoodieTableMetaClient.initTableType(HoodieCLI.conf, path, tableType, name, payloadClass);
if (!fs.exists(path)) { HoodieTableMetaClient .initTableType(jsc.hadoopConfiguration(), tablePath, HoodieTableType.valueOf(tableType), tableName, HoodieAvroPayload.class.getName());
.initTableType(jsc.hadoopConfiguration(), dfsBasePath, HoodieTableType.valueOf(tableType), tableName, HoodieAvroPayload.class.getName()); .initTableType(jsc.hadoopConfiguration(), tablePath, HoodieTableType.valueOf(tableType), tableName, HoodieAvroPayload.class.getName()); HoodieWriteConfig localConfig = HoodieWriteConfig.newBuilder().withPath(tablePath)