public static MessageColumnIO getColumnIO(MessageType fileSchema, MessageType requestedSchema) { return (new ColumnIOFactory()).getColumnIO(requestedSchema, fileSchema, true); }
public static List<PrimitiveColumnIO> getColumns(MessageType fileSchema, MessageType requestedSchema) { return (new ColumnIOFactory()).getColumnIO(requestedSchema, fileSchema, true).getLeaves(); }
@Override public void prepareForWrite(RecordConsumer recordConsumer) { final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); this.parquetWriteProtocol = new ParquetWriteProtocol(recordConsumer, columnIO, thriftStruct); }
private void initStore() { // we don't want this number to be too small // ideally we divide the block equally across the columns // it is unlikely all columns are going to be the same size. int initialBlockBufferSize = max(MINIMUM_BUFFER_SIZE, blockSize / schema.getColumns().size() / 5); pageStore = new ColumnChunkPageWriteStore(compressor, schema, initialBlockBufferSize); // we don't want this number to be too small either // ideally, slightly bigger than the page size, but not bigger than the block buffer int initialPageBufferSize = max(MINIMUM_BUFFER_SIZE, min(pageSize + pageSize / 10, initialBlockBufferSize)); store = new ColumnWriteStoreImpl(pageStore, pageSize, initialPageBufferSize, dictionaryPageSize, enableDictionary, writerVersion); MessageColumnIO columnIO = new ColumnIOFactory(validating).getColumnIO(schema); writeSupport.prepareForWrite(columnIO.getRecordWriter(store)); }
@Override public void prepareForWrite(RecordConsumer recordConsumer) { final MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); this.parquetWriteProtocol = new ParquetWriteProtocol(recordConsumer, columnIO, thriftStruct); thriftWriteSupport.prepareForWrite(recordConsumer); }
private static MessageColumnIO newColumnFactory(MessageType schema) { return new ColumnIOFactory().getColumnIO(schema); } private static void read(RecordReader<Object> recordReader, int count, MessageType schema) {
private void initStore() { pageStore = new ColumnChunkPageWriteStore(compressor, schema, pageSize); columnStore = parquetProperties.newColumnWriteStore( schema, pageStore, pageSize); MessageColumnIO columnIO = new ColumnIOFactory(validating).getColumnIO(schema); writeSupport.prepareForWrite(columnIO.getRecordWriter(columnStore)); }
footer.getFileMetaData().getSchema(), mappingConfiguration); this.columnIo = new ColumnIOFactory().getColumnIO( materializer.getMaterializeSchema(), footer.getFileMetaData().getSchema());
@Test public void testPushParser() { MemPageStore memPageStore = new MemPageStore(); MemColumnWriteStore columns = new MemColumnWriteStore(memPageStore, 800); MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); new GroupWriter(columnIO.getRecordWriter(columns), schema).write(r1); columns.flush(); final Deque<String> expectations = new ArrayDeque<String>(); for (String string : expectedEventsForR1) { expectations.add(string); } RecordReader<Void> recordReader = columnIO.getRecordReader(memPageStore, new ExpectationValidatingConverter(expectations, schema)); recordReader.read(); }
MemColumnWriteStore columns = new MemColumnWriteStore(memPageStore, 800); ColumnIOFactory columnIOFactory = new ColumnIOFactory(true);
MessageColumnIO columnIO = new ColumnIOFactory().getColumnIO(schema); GroupWriter groupWriter = new GroupWriter(columnIO.getRecordWriter(columns), schema); groupWriter.write(r1);