public List<ColumnVector> getColumnVectors(int colIx) { if (!hasVectors(colIx)) throw new AssertionError("No data for column " + colIx); return columnVectors[colIx]; }
public List<ColumnVector> getColumnVectors(int colIx) { if (!hasVectors(colIx)) throw new AssertionError("No data for column " + colIx); return columnVectors[colIx]; } }
for (int childIx = 0; childIx < childCount; ++childIx) { int batchColIx = children.get(childIx).getId(); if (!batch.hasData(batchColIx) && !batch.hasVectors(batchColIx)) { if (LOG.isDebugEnabled()) { LOG.debug("Column at " + childIx + " " + children.get(childIx).getId() for (int schemaChildIx = 0, inclChildIx = -1; schemaChildIx < childCount; ++schemaChildIx) { int batchColIx = children.get(schemaChildIx).getId(); if (!batch.hasData(batchColIx) && !batch.hasVectors(batchColIx)) continue; childReaders[++inclChildIx] = createEncodedTreeReader( schema.getChildren().get(schemaChildIx), encodings, batch, codec, context);
if (batch.hasData(columnIndex)) { streamBuffers = batch.getColumnData(columnIndex); } else if (batch.hasVectors(columnIndex)) { vectors = batch.getColumnVectors(columnIndex); } else {
if (batch.hasData(columnIndex)) { streamBuffers = batch.getColumnData(columnIndex); } else if (batch.hasVectors(columnIndex)) { vectors = batch.getColumnVectors(columnIndex); } else {
public static StructTreeReader createRootTreeReader(TypeDescription[] batchSchemas, List<OrcProto.ColumnEncoding> encodings, OrcEncodedColumnBatch batch, CompressionCodec codec, Context context, final boolean useDecimal64ColumnVectors) throws IOException { // Note: we only look at the schema here to deal with complex types. Somebody has set up the // reader with whatever ideas they had to the schema and we just trust the reader to // produce the CVBs that was asked for. However, we only need to look at top level columns. int includedCount = batch.getColumnsWithDataCount(); if (batchSchemas.length > includedCount) { throw new AssertionError("For " + Arrays.toString(batchSchemas) + ", only received " + includedCount + " columns"); } TreeReader[] childReaders = new TreeReader[batchSchemas.length]; for (int i = 0; i < batchSchemas.length; ++i) { int batchColIx = batchSchemas[i].getId(); if (!batch.hasData(batchColIx) && !batch.hasVectors(batchColIx)) { throw new AssertionError("No data for column " + batchColIx + ": " + batchSchemas[i]); } childReaders[i] = createEncodedTreeReader(batchSchemas[i], encodings, batch, codec, context, useDecimal64ColumnVectors); } // TODO: do we actually need this reader? the caller just extracts child readers. return StructStreamReader.builder() .setColumnIndex(0) .setCompressionCodec(codec) .setColumnEncoding(encodings.get(0)) .setChildReaders(childReaders) .setContext(context) .build(); }
public int getColumnsWithDataCount() { int childCount = hasData.length, result = 0; for (int childIx = 0; childIx < childCount; ++childIx) { if (!hasData(childIx) && !hasVectors(childIx)) continue; ++result; } return result; } }