@Override public Path getWorkingDirectory() { return convertToHoodiePath(fileSystem.getWorkingDirectory()); }
@Override public Path getHomeDirectory() { return convertToHoodiePath(fileSystem.getHomeDirectory()); }
@Override public Path makeQualified(Path path) { return convertToHoodiePath(fileSystem.makeQualified(convertToDefaultPath(path))); }
@Override public Path getWorkingDirectory() { return convertToHoodiePath(fileSystem.getWorkingDirectory()); }
@Override public Path getHomeDirectory() { return convertToHoodiePath(fileSystem.getHomeDirectory()); }
@Override public Path makeQualified(Path path) { return convertToHoodiePath(fileSystem.makeQualified(convertToDefaultPath(path))); }
@Override public Path resolvePath(Path p) throws IOException { return convertToHoodiePath(fileSystem.resolvePath(convertToDefaultPath(p))); }
@Override public Path createSnapshot(Path path, String snapshotName) throws IOException { return convertToHoodiePath(fileSystem.createSnapshot(convertToDefaultPath(path), snapshotName)); }
@Override public Path getLinkTarget(Path f) throws IOException { return convertToHoodiePath(fileSystem.getLinkTarget(convertToDefaultPath(f))); }
@Override public Path getLinkTarget(Path f) throws IOException { return convertToHoodiePath(fileSystem.getLinkTarget(convertToDefaultPath(f))); }
@Override public Path resolvePath(Path p) throws IOException { return convertToHoodiePath(fileSystem.resolvePath(convertToDefaultPath(p))); }
@Override public Path createSnapshot(Path path, String snapshotName) throws IOException { return convertToHoodiePath(fileSystem.createSnapshot(convertToDefaultPath(path), snapshotName)); }
@Override public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { return convertToHoodiePath(fileSystem .startLocalOutput(convertToDefaultPath(fsOutputFile), convertToDefaultPath(tmpLocalFile))); }
@Override public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException { return convertToHoodiePath(fileSystem .startLocalOutput(convertToDefaultPath(fsOutputFile), convertToDefaultPath(tmpLocalFile))); }
public HoodieParquetWriter(String commitTime, Path file, HoodieParquetConfig parquetConfig, Schema schema) throws IOException { super(HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf()), ParquetFileWriter.Mode.CREATE, parquetConfig.getWriteSupport(), parquetConfig.getCompressionCodecName(), parquetConfig.getBlockSize(), parquetConfig.getPageSize(), parquetConfig.getPageSize(), ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED, ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED, ParquetWriter.DEFAULT_WRITER_VERSION, registerFileSystem(file, parquetConfig.getHadoopConf())); this.file = HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf()); this.fs = (HoodieWrapperFileSystem) this.file .getFileSystem(registerFileSystem(file, parquetConfig.getHadoopConf())); // We cannot accurately measure the snappy compressed output file size. We are choosing a // conservative 10% // TODO - compute this compression ratio dynamically by looking at the bytes written to the // stream and the actual file size reported by HDFS this.maxFileSize = parquetConfig.getMaxFileSize() + Math .round(parquetConfig.getMaxFileSize() * parquetConfig.getCompressionRatio()); this.writeSupport = parquetConfig.getWriteSupport(); this.commitTime = commitTime; this.schema = schema; }
public HoodieParquetWriter(String commitTime, Path file, HoodieParquetConfig parquetConfig, Schema schema) throws IOException { super(HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf()), ParquetFileWriter.Mode.CREATE, parquetConfig.getWriteSupport(), parquetConfig.getCompressionCodecName(), parquetConfig.getBlockSize(), parquetConfig.getPageSize(), parquetConfig.getPageSize(), ParquetWriter.DEFAULT_IS_DICTIONARY_ENABLED, ParquetWriter.DEFAULT_IS_VALIDATING_ENABLED, ParquetWriter.DEFAULT_WRITER_VERSION, registerFileSystem(file, parquetConfig.getHadoopConf())); this.file = HoodieWrapperFileSystem.convertToHoodiePath(file, parquetConfig.getHadoopConf()); this.fs = (HoodieWrapperFileSystem) this.file .getFileSystem(registerFileSystem(file, parquetConfig.getHadoopConf())); // We cannot accurately measure the snappy compressed output file size. We are choosing a // conservative 10% // TODO - compute this compression ratio dynamically by looking at the bytes written to the // stream and the actual file size reported by HDFS this.maxFileSize = parquetConfig.getMaxFileSize() + Math .round(parquetConfig.getMaxFileSize() * parquetConfig.getCompressionRatio()); this.writeSupport = parquetConfig.getWriteSupport(); this.commitTime = commitTime; this.schema = schema; }