@Override public void close() throws IOException { try { this.writer.close(); } finally { super.close(); } } }
@Override public void write(Group record) throws IOException { this.writer.write(record); this.count.incrementAndGet(); }
/** * Build a {@link ParquetWriter<Group>} for given file path with a block size. * @param blockSize * @param stagingFile * @return * @throws IOException */ public ParquetWriter<Group> getWriter(int blockSize, Path stagingFile) throws IOException { State state = this.destination.getProperties(); int pageSize = state.getPropAsInt(getProperty(WRITER_PARQUET_PAGE_SIZE), DEFAULT_PAGE_SIZE); int dictPageSize = state.getPropAsInt(getProperty(WRITER_PARQUET_DICTIONARY_PAGE_SIZE), DEFAULT_BLOCK_SIZE); boolean enableDictionary = state.getPropAsBoolean(getProperty(WRITER_PARQUET_DICTIONARY), DEFAULT_IS_DICTIONARY_ENABLED); boolean validate = state.getPropAsBoolean(getProperty(WRITER_PARQUET_VALIDATE), DEFAULT_IS_VALIDATING_ENABLED); String rootURI = state.getProp(WRITER_FILE_SYSTEM_URI, LOCAL_FS_URI); Path absoluteStagingFile = new Path(rootURI, stagingFile); CompressionCodecName codec = getCodecFromConfig(); GroupWriteSupport support = new GroupWriteSupport(); Configuration conf = new Configuration(); GroupWriteSupport.setSchema(this.schema, conf); ParquetProperties.WriterVersion writerVersion = getWriterVersion(); return new ParquetWriter<>(absoluteStagingFile, support, codec, blockSize, pageSize, dictPageSize, enableDictionary, validate, writerVersion, conf); }
@Override <T> ParquetWriter<T> newInstance( Path path, WriteSupport<T> writeSupport, ParquetFileOutput.Options options, Configuration configuration) throws IOException { return new ParquetWriter<>(path, writeSupport); } },
@Override <T> ParquetWriter<T> newInstance( Path path, WriteSupport<T> writeSupport, ParquetFileOutput.Options options, Configuration configuration) throws IOException { return new ParquetWriter<>( path, writeSupport, options.getCompressionCodecName(), options.getBlockSize(), options.getDataPageSize(), options.getDictionaryPageSize(), options.isEnableDictionary(), options.isEnableValidation()); } },
@Override public void close() throws IOException { if (currentWriter != null) { currentWriter.close(); } }
@Override public void write(Group record) throws IOException { this.writer.write(record); this.count.incrementAndGet(); }
@Override <T> ParquetWriter<T> newInstance( Path path, WriteSupport<T> writeSupport, ParquetFileOutput.Options options, Configuration configuration) throws IOException { return new ParquetWriter<>( path, writeSupport, options.getCompressionCodecName(), options.getBlockSize(), options.getDataPageSize(), options.getDictionaryPageSize(), options.isEnableDictionary(), options.isEnableValidation(), options.getWriterVersion(), configuration); } },
@Override public void close() throws IOException { try { this.writer.close(); } finally { super.close(); } } }
@Override public void write(T model) throws IOException { ParquetWriter<T> writer = prepareWriter(); writer.write(model); // not sure counter.add(1); }
@Override <T> ParquetWriter<T> newInstance( Path path, WriteSupport<T> writeSupport, ParquetFileOutput.Options options, Configuration configuration) throws IOException { return new ParquetWriter<>( path, writeSupport, options.getCompressionCodecName(), options.getBlockSize(), options.getDataPageSize(), options.getDictionaryPageSize(), options.isEnableDictionary(), options.isEnableValidation(), options.getWriterVersion()); } },
/** * Build a {@link ParquetWriter<Group>} for given file path with a block size. * @param blockSize * @param stagingFile * @return * @throws IOException */ public ParquetWriter<Group> getWriter(int blockSize, Path stagingFile) throws IOException { State state = this.destination.getProperties(); int pageSize = state.getPropAsInt(getProperty(WRITER_PARQUET_PAGE_SIZE), DEFAULT_PAGE_SIZE); int dictPageSize = state.getPropAsInt(getProperty(WRITER_PARQUET_DICTIONARY_PAGE_SIZE), DEFAULT_BLOCK_SIZE); boolean enableDictionary = state.getPropAsBoolean(getProperty(WRITER_PARQUET_DICTIONARY), DEFAULT_IS_DICTIONARY_ENABLED); boolean validate = state.getPropAsBoolean(getProperty(WRITER_PARQUET_VALIDATE), DEFAULT_IS_VALIDATING_ENABLED); String rootURI = state.getProp(WRITER_FILE_SYSTEM_URI, LOCAL_FS_URI); Path absoluteStagingFile = new Path(rootURI, stagingFile); CompressionCodecName codec = getCodecFromConfig(); GroupWriteSupport support = new GroupWriteSupport(); Configuration conf = new Configuration(); GroupWriteSupport.setSchema(this.schema, conf); ParquetProperties.WriterVersion writerVersion = getWriterVersion(); return new ParquetWriter<>(absoluteStagingFile, support, codec, blockSize, pageSize, dictPageSize, enableDictionary, validate, writerVersion, conf); }