private static Map<ColumnDescriptor, DictionaryDescriptor> getDictionaries(BlockMetaData blockMetadata, ParquetDataSource dataSource, Map<List<String>, RichColumnDescriptor> descriptorsByPath, TupleDomain<ColumnDescriptor> parquetTupleDomain) { ImmutableMap.Builder<ColumnDescriptor, DictionaryDescriptor> dictionaries = ImmutableMap.builder(); for (ColumnChunkMetaData columnMetaData : blockMetadata.getColumns()) { RichColumnDescriptor descriptor = descriptorsByPath.get(Arrays.asList(columnMetaData.getPath().toArray())); if (descriptor != null) { if (isOnlyDictionaryEncodingPages(columnMetaData.getEncodings()) && isColumnPredicate(descriptor, parquetTupleDomain)) { int totalSize = toIntExact(columnMetaData.getTotalSize()); byte[] buffer = new byte[totalSize]; dataSource.readFully(columnMetaData.getStartingPos(), buffer); Optional<DictionaryPage> dictionaryPage = readDictionaryPage(buffer, columnMetaData.getCodec()); dictionaries.put(descriptor, new DictionaryDescriptor(descriptor, dictionaryPage)); break; } } } return dictionaries.build(); }
private ColumnChunk readPrimitive(PrimitiveField field) throws IOException { ColumnDescriptor columnDescriptor = field.getDescriptor(); PrimitiveColumnReader columnReader = columnReaders[field.getId()]; if (columnReader.getPageReader() == null) { validateParquet(currentBlockMetadata.getRowCount() > 0, "Row group has 0 rows"); ColumnChunkMetaData metadata = getColumnChunkMetaData(columnDescriptor); long startingPosition = metadata.getStartingPos(); int totalSize = toIntExact(metadata.getTotalSize()); byte[] buffer = allocateBlock(totalSize); dataSource.readFully(startingPosition, buffer); ColumnChunkDescriptor descriptor = new ColumnChunkDescriptor(columnDescriptor, metadata, totalSize); ParquetColumnChunk columnChunk = new ParquetColumnChunk(descriptor, buffer, 0); columnReader.setPageReader(columnChunk.readAllPages()); } return columnReader.readPrimitive(field); }
/** * @return the compressed size of all columns */ public long getCompressedSize() { long totalSize = 0; for (ColumnChunkMetaData col : getColumns()) { totalSize += col.getTotalSize(); } return totalSize; } }
for (ColumnChunkMetaData column : block.getColumns()) { long off = column.getFirstDataPageOffset(); long len = column.getTotalSize(); begin = Math.min(begin, off); end = Math.max(end, off + len);
/** * get the standard deviation of the column chunk sizes. * @param avgSize * @return */ @Override public double[] getColumnChunkSizeStdDev (double[] avgSize) { double[] dev = new double[this.columnCount]; for (int i = 0; i < this.columnCount; ++i) { dev[i] = 0; } for (BlockMetaData block : getBlocks()) { int i = 0; for (ColumnChunkMetaData column : block.getColumns()) { dev[i] += Math.pow(column.getTotalSize() - avgSize[i], 2); i++; } } long blockCount = this.getRowGroupCount(); for (int i = 0; i < this.columnCount; ++i) { dev[i] = Math.sqrt(dev[i] / blockCount); } return dev; }
for (ColumnChunkMetaData column : block.getColumns()) { long offset = column.getFirstDataPageOffset(); long size = column.getTotalSize(); begin = Math.min(begin, offset); end = Math.max(end, offset + size);
/** * get the average column chunk size of all the row groups * @return */ @Override public double[] getAvgColumnChunkSize () { double[] sum = new double[this.columnCount]; for (int i = 0; i < this.columnCount; ++i) { sum[i] = 0; } for (BlockMetaData block : getBlocks()) { int i = 0; for (ColumnChunkMetaData column : block.getColumns()) { sum[i] += column.getTotalSize(); i++; } } long blockCount = this.getRowGroupCount(); for (int i = 0; i < this.columnCount; ++i) { sum[i] /= blockCount; } return sum; }
private static long end(List<BlockMetaData> blocks, String requestedSchema) { MessageType requested = MessageTypeParser.parseMessageType(requestedSchema); long length = 0; for (BlockMetaData block : blocks) { List<ColumnChunkMetaData> columns = block.getColumns(); for (ColumnChunkMetaData column : columns) { if (requested.containsPath(column.getPath().toArray())) { length += column.getTotalSize(); } } } return length; }
for (ColumnChunkMetaData mc : block.getColumns()) { ColumnPath pathKey = mc.getPath(); BenchmarkCounter.incrementTotalBytes(mc.getTotalSize()); ColumnDescriptor columnDescriptor = paths.get(pathKey); if (columnDescriptor != null) { allChunks.add(currentChunks); currentChunks.addChunk(new ChunkDescriptor(columnDescriptor, mc, startingPos, (int)mc.getTotalSize()));
private static void showDetails(PrettyPrintWriter out, ColumnChunkMetaData meta, boolean name) { long doff = meta.getDictionaryPageOffset(); long foff = meta.getFirstDataPageOffset(); long tsize = meta.getTotalSize(); long usize = meta.getTotalUncompressedSize(); long count = meta.getValueCount(); double ratio = usize / (double)tsize; String encodings = Joiner.on(',').skipNulls().join(meta.getEncodings()); if (name) { String path = Joiner.on('.').skipNulls().join(meta.getPath()); out.format("%s: ", path); } out.format(" %s", meta.getType()); out.format(" %s", meta.getCodec()); out.format(" DO:%d", doff); out.format(" FPO:%d", foff); out.format(" SZ:%d/%d/%.2f", tsize, usize, ratio); out.format(" VC:%d", count); if (!encodings.isEmpty()) out.format(" ENC:%s", encodings); out.println(); }
isOnlyDictionaryEncodingPages(columnChunkMetaData.getEncodings())) { try { int totalSize = Ints.checkedCast(columnChunkMetaData.getTotalSize()); byte[] buffer = new byte[totalSize]; dataSource.readFully(columnChunkMetaData.getStartingPos(), buffer);
public Block readBlock(ColumnDescriptor columnDescriptor, Type type) throws IOException { ParquetColumnReader columnReader = columnReadersMap.get(columnDescriptor); if (columnReader.getPageReader() == null) { validateParquet(currentBlockMetadata.getRowCount() > 0, "Row group having 0 rows"); ColumnChunkMetaData metadata = getColumnChunkMetaData(columnDescriptor); long startingPosition = metadata.getStartingPos(); int totalSize = Ints.checkedCast(metadata.getTotalSize()); byte[] buffer = new byte[totalSize]; dataSource.readFully(startingPosition, buffer); ParquetColumnChunkDescriptor descriptor = new ParquetColumnChunkDescriptor(columnDescriptor, metadata, startingPosition, totalSize); ParquetColumnChunk columnChunk = new ParquetColumnChunk(descriptor, buffer, 0, codecFactory); columnReader.setPageReader(columnChunk.readAllPages()); } return columnReader.readBlock(type); }
public ParquetInputSplit getParquetInputSplit(FileStatus fileStatus, String requestedSchema, Map<String, String> readSupportMetadata) throws IOException { MessageType requested = MessageTypeParser.parseMessageType(requestedSchema); long length = 0; for (BlockMetaData block : this.getRowGroups()) { List<ColumnChunkMetaData> columns = block.getColumns(); for (ColumnChunkMetaData column : columns) { if (requested.containsPath(column.getPath().toArray())) { length += column.getTotalSize(); } } } BlockMetaData lastRowGroup = this.getRowGroups().get(this.getRowGroupCount() - 1); long end = lastRowGroup.getStartingPos() + lastRowGroup.getTotalByteSize(); long[] rowGroupOffsets = new long[this.getRowGroupCount()]; for (int i = 0; i < rowGroupOffsets.length; i++) { rowGroupOffsets[i] = this.getRowGroups().get(i).getStartingPos(); } return new ParquetInputSplit( fileStatus.getPath(), hdfsBlock.getOffset(), end, length, hdfsBlock.getHosts(), rowGroupOffsets ); } }
private void addRowGroup(ParquetMetadata parquetMetadata, List<RowGroup> rowGroups, BlockMetaData block) { //rowGroup.total_byte_size = ; List<ColumnChunkMetaData> columns = block.getColumns(); List<ColumnChunk> parquetColumns = new ArrayList<ColumnChunk>(); for (ColumnChunkMetaData columnMetaData : columns) { ColumnChunk columnChunk = new ColumnChunk(columnMetaData.getFirstDataPageOffset()); // verify this is the right offset columnChunk.file_path = block.getPath(); // they are in the same file for now columnChunk.meta_data = new parquet.format.ColumnMetaData( getType(columnMetaData.getType()), toFormatEncodings(columnMetaData.getEncodings()), Arrays.asList(columnMetaData.getPath().toArray()), columnMetaData.getCodec().getParquetCompressionCodec(), columnMetaData.getValueCount(), columnMetaData.getTotalUncompressedSize(), columnMetaData.getTotalSize(), columnMetaData.getFirstDataPageOffset()); columnChunk.meta_data.dictionary_page_offset = columnMetaData.getDictionaryPageOffset(); if (!columnMetaData.getStatistics().isEmpty()) { columnChunk.meta_data.setStatistics(toParquetStatistics(columnMetaData.getStatistics())); } // columnChunk.meta_data.index_page_offset = ; // columnChunk.meta_data.key_value_metadata = ; // nothing yet parquetColumns.add(columnChunk); } RowGroup rowGroup = new RowGroup(parquetColumns, block.getTotalByteSize(), block.getRowCount()); rowGroups.add(rowGroup); }
private static void add(ParquetMetadata footer) { for (BlockMetaData blockMetaData : footer.getBlocks()) { ++ blockCount; MessageType schema = footer.getFileMetaData().getSchema(); recordCount += blockMetaData.getRowCount(); List<ColumnChunkMetaData> columns = blockMetaData.getColumns(); for (ColumnChunkMetaData columnMetaData : columns) { ColumnDescriptor desc = schema.getColumnDescription(columnMetaData.getPath().toArray()); add( desc, columnMetaData.getValueCount(), columnMetaData.getTotalSize(), columnMetaData.getTotalUncompressedSize(), columnMetaData.getEncodings(), columnMetaData.getStatistics()); } } }