public OutputStreamOrcDataSink(OutputStream outputStream) { this.output = new OutputStreamSliceOutput(requireNonNull(outputStream, "outputStream is null")); }
@Override public long size() { return output.longSize(); }
@Override public void close() throws IOException { output.close(); } }
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
@Override public int size() { return checkedCast(longSize()); }
@Override public long getRetainedSizeInBytes() { return INSTANCE_SIZE + output.getRetainedSize(); }
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
@Override public int size() { return checkedCast(longSize()); }
@Override public long getRetainedSizeInBytes() { return INSTANCE_SIZE + output.getRetainedSize(); }
new OutputStreamSliceOutput(this.outputStream), fileColumnTypes, rcFileEncoding,
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
@Override public long size() { return output.longSize(); }
@Override public void close() throws IOException { output.close(); } }
@Override public long getRetainedSizeInBytes() { return INSTANCE_SIZE + output.getRetainedSize(); }
SliceOutput sliceOutput = new OutputStreamSliceOutput(output); writeSerializedPages(sliceOutput, serializedPages);
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
@Override public long size() { return output.longSize(); }
@Override public void close() throws IOException { output.close(); } }
@Override public long getRetainedSizeInBytes() { return INSTANCE_SIZE + output.getRetainedSize(); }
private void writePages(Iterator<Page> pageIterator) { checkState(writable, "Spilling no longer allowed. The spiller has been made non-writable on first read for subsequent reads to be consistent"); try (SliceOutput output = new OutputStreamSliceOutput(targetFile.newOutputStream(APPEND), BUFFER_SIZE)) { while (pageIterator.hasNext()) { Page page = pageIterator.next(); spilledPagesInMemorySize += page.getSizeInBytes(); SerializedPage serializedPage = serde.serialize(page); long pageSize = serializedPage.getSizeInBytes(); localSpillContext.updateBytes(pageSize); spillerStats.addToTotalSpilledBytes(pageSize); writeSerializedPage(output, serializedPage); } } catch (UncheckedIOException | IOException e) { throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to spill pages", e); } }