@Override public void close() { if (stream != null) { listener.recordsWritten(count, getFileSize(), path.toString(), null, partition.getBucketNumber()); stream.close(); stream = null; fos = null; count = 0; index = 0; logger.debug("closing file"); } }
writer.close(); verify(outputEntryListener, times(1)).recordsWritten(recordWrittenCaptor.capture(), fileSizeCaptor.capture(), pathCaptor.capture(), metadataCaptor.capture(), partitionCaptor.capture());
verify(outputEntryListener, times(1)).recordsWritten(recordWrittenCaptor.capture(), fileSizeCaptor.capture(), pathCaptor.capture(), metadataCaptor.capture(), partitionCaptor.capture()); verify(writeStatsListener, times(batches.length)).bytesWritten(bytesWrittenCaptor.capture());
private void flushAndClose() throws IOException { if(parquetFileWriter == null){ return; } if (recordCount > 0) { long memSize = store.getBufferedSize(); parquetFileWriter.startBlock(recordCount); consumer.flush(); store.flush(); ColumnChunkPageWriteStoreExposer.flushPageStore(pageStore, parquetFileWriter); parquetFileWriter.endBlock(); long recordsWritten = recordCount; // we are writing one single block per file parquetFileWriter.end(extraMetaData); byte[] metadata = this.trackingConverter == null ? null : trackingConverter.getMetadata(); final long fileSize = parquetFileWriter.getPos(); listener.recordsWritten(recordsWritten, fileSize, path.toString(), metadata /** TODO: add parquet footer **/, partition.getBucketNumber()); parquetFileWriter = null; updateStats(memSize, recordCount); recordCount = 0; } if(store != null){ store.close(); } store = null; pageStore = null; index++; }
@Override public void close() throws Exception { try{ if(gen == null){ // create an empty file. startPartition(WritePartition.NONE); } }finally{ AutoCloseables.close( new AutoCloseable(){ @Override public void close() throws IOException { if(gen != null){ gen.flush(); if (stream != null) { fileSize = stream.getPos(); } } }}, stream ); stream = null; if(gen != null){ listener.recordsWritten(recordCount, fileSize, fileName.toString(), null, partition.getBucketNumber()); } recordCount = 0; fileSize = 0; } } }
private void closeCurrentFile() throws IOException { if (currentFileOutputStream != null) { // Save the footer starting offset final long footerStartOffset = currentFileOutputStream.getPos(); // write the footer ArrowFileFooter footer = footerBuilder.build(); footer.writeDelimitedTo(currentFileOutputStream); // write the foot offset currentFileOutputStream.writeLong(footerStartOffset); // write magic word bytes currentFileOutputStream.write(MAGIC_STRING.getBytes()); final long fileSize = currentFileOutputStream.getPos(); currentFileOutputStream.close(); currentFileOutputStream = null; ArrowFileMetadata lastFileMetadata = ArrowFileMetadata.newBuilder() .setFooter(footer) .setRecordCount(recordCount) .setPath(relativePath) .build(); outputEntryListener.recordsWritten(recordCount, fileSize, currentFile.toString(), lastFileMetadata.toByteArray(), null); } }