public OutputStreamOrcDataSink(OutputStream outputStream) { this.output = new OutputStreamSliceOutput(requireNonNull(outputStream, "outputStream is null")); }
new OutputStreamSliceOutput(this.outputStream), fileColumnTypes, rcFileEncoding,
SliceOutput sliceOutput = new OutputStreamSliceOutput(output); writeSerializedPages(sliceOutput, serializedPages);
private void writePages(Iterator<Page> pageIterator) { checkState(writable, "Spilling no longer allowed. The spiller has been made non-writable on first read for subsequent reads to be consistent"); try (SliceOutput output = new OutputStreamSliceOutput(targetFile.newOutputStream(APPEND), BUFFER_SIZE)) { while (pageIterator.hasNext()) { Page page = pageIterator.next(); spilledPagesInMemorySize += page.getSizeInBytes(); SerializedPage serializedPage = serde.serialize(page); long pageSize = serializedPage.getSizeInBytes(); localSpillContext.updateBytes(pageSize); spillerStats.addToTotalSpilledBytes(pageSize); writeSerializedPage(output, serializedPage); } } catch (UncheckedIOException | IOException e) { throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to spill pages", e); } }
public PrestoRcFileFormatWriter(File targetFile, List<Type> types, RcFileEncoding encoding, HiveCompressionCodec compressionCodec) throws IOException { writer = new RcFileWriter( new OutputStreamSliceOutput(new FileOutputStream(targetFile)), types, encoding, compressionCodec.getCodec().map(Class::getName), new AircompressorCodecFactory(new HadoopCodecFactory(getClass().getClassLoader())), ImmutableMap.of(), true); }
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
public OutputStreamOrcDataSink(OutputStream outputStream) { this.output = new OutputStreamSliceOutput(requireNonNull(outputStream, "outputStream is null")); }
public OutputStreamOrcDataSink(OutputStream outputStream) { this.output = new OutputStreamSliceOutput(requireNonNull(outputStream, "outputStream is null")); }
public OutputStreamOrcDataSink(OutputStream outputStream) { this.output = new OutputStreamSliceOutput(requireNonNull(outputStream, "outputStream is null")); }
private void assertEncoding(Consumer<SliceOutput> operations, int offset, byte... output) throws IOException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (SliceOutput sliceOutput = new OutputStreamSliceOutput(byteArrayOutputStream, 16384)) { sliceOutput.writeZero(offset); operations.accept(sliceOutput); assertEquals(sliceOutput.size(), offset + output.length); } byte[] expected = new byte[offset + output.length]; System.arraycopy(output, 0, expected, offset, output.length); assertEquals(byteArrayOutputStream.toByteArray(), expected); } }
private void assertEncoding(Consumer<SliceOutput> operations, int offset, byte... output) throws IOException { ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream(); try (SliceOutput sliceOutput = new OutputStreamSliceOutput(byteArrayOutputStream, 16384)) { sliceOutput.writeZero(offset); operations.accept(sliceOutput); assertEquals(sliceOutput.size(), offset + output.length); } byte[] expected = new byte[offset + output.length]; System.arraycopy(output, 0, expected, offset, output.length); assertEquals(byteArrayOutputStream.toByteArray(), expected); } }
new OutputStreamSliceOutput(this.outputStream), fileColumnTypes, rcFileEncoding,
public PrestoRcFileFormatWriter(File targetFile, List<Type> types, RcFileEncoding encoding, HiveCompressionCodec compressionCodec) throws IOException { writer = new RcFileWriter( new OutputStreamSliceOutput(new FileOutputStream(targetFile)), types, encoding, compressionCodec.getCodec().map(Class::getName), new AircompressorCodecFactory(new HadoopCodecFactory(getClass().getClassLoader())), ImmutableMap.of(), true); }
public PrestoRcFileFormatWriter(File targetFile, List<Type> types, RcFileEncoding encoding, HiveCompressionCodec compressionCodec) throws IOException { writer = new RcFileWriter( new OutputStreamSliceOutput(new FileOutputStream(targetFile)), types, encoding, compressionCodec.getCodec().map(Class::getName), new AircompressorCodecFactory(new HadoopCodecFactory(getClass().getClassLoader())), ImmutableMap.of(), true); }
private void writePages(Iterator<Page> pageIterator) { checkState(writable, "Spilling no longer allowed. The spiller has been made non-writable on first read for subsequent reads to be consistent"); try (SliceOutput output = new OutputStreamSliceOutput(targetFile.newOutputStream(APPEND), BUFFER_SIZE)) { while (pageIterator.hasNext()) { Page page = pageIterator.next(); spilledPagesInMemorySize += page.getSizeInBytes(); SerializedPage serializedPage = serde.serialize(page); long pageSize = serializedPage.getSizeInBytes(); localSpillContext.updateBytes(pageSize); spillerStats.addToTotalSpilledBytes(pageSize); writeSerializedPage(output, serializedPage); } } catch (UncheckedIOException | IOException e) { throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to spill pages", e); } }
public PrestoRcFileFormatWriter(File targetFile, List<Type> types, RcFileEncoding encoding, HiveCompressionCodec compressionCodec) throws IOException { writer = new RcFileWriter( new OutputStreamSliceOutput(new FileOutputStream(targetFile)), types, encoding, compressionCodec.getCodec().map(Class::getName), new AircompressorCodecFactory(new HadoopCodecFactory(getClass().getClassLoader())), ImmutableMap.of(), true); }
private void writePages(Iterator<Page> pageIterator) { checkState(writable, "Spilling no longer allowed. The spiller has been made non-writable on first read for subsequent reads to be consistent"); try (SliceOutput output = new OutputStreamSliceOutput(targetFile.newOutputStream(APPEND), BUFFER_SIZE)) { while (pageIterator.hasNext()) { Page page = pageIterator.next(); spilledPagesInMemorySize += page.getSizeInBytes(); SerializedPage serializedPage = serde.serialize(page); long pageSize = serializedPage.getSizeInBytes(); localSpillContext.updateBytes(pageSize); spillerStats.addToTotalSpilledBytes(pageSize); writeSerializedPage(output, serializedPage); } } catch (UncheckedIOException | IOException e) { throw new PrestoException(GENERIC_INTERNAL_ERROR, "Failed to spill pages", e); } }
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }
private static DataSize writeRcFileColumnNew(File outputFile, Format format, Compression compression, Type type, Iterator<?> values, Map<String, String> metadata) throws Exception { OutputStreamSliceOutput output = new OutputStreamSliceOutput(new FileOutputStream(outputFile)); AircompressorCodecFactory codecFactory = new AircompressorCodecFactory(new HadoopCodecFactory(RcFileTester.class.getClassLoader())); RcFileWriter writer = new RcFileWriter( output, ImmutableList.of(type), format.getVectorEncoding(), compression.getCodecName(), codecFactory, metadata, new DataSize(100, KILOBYTE), // use a smaller size to create more row groups new DataSize(200, KILOBYTE), true); BlockBuilder blockBuilder = type.createBlockBuilder(null, 1024); while (values.hasNext()) { Object value = values.next(); writeValue(type, blockBuilder, value); } writer.write(new Page(blockBuilder.build())); writer.close(); writer.validate(new FileRcFileDataSource(outputFile)); return new DataSize(output.size(), BYTE); }