private static void validate(WritableBatch b) { for (SerializedField f : b.getDef().getFieldList()) { if (!f.hasNamePart()) { throw new RuntimeException("Field is missing name"); } if (f.getNamePart().getName().equals("")) { throw new RuntimeException("Field name is empty"); } } }
public static Field create(SerializedField serField){ if (!serField.hasNamePart() || serField.getNamePart().getName().equals("")) { throw new RuntimeException(); } return TypeHelper.getFieldForSerializedField(serField); }
private static Map<String, MajorType> extractSchema(final RecordBatchDef batchDef) { final List<SerializedField> serializedFields = batchDef.getFieldList(); final Map<String, MajorType> fieldsMap = Maps.newHashMap(); for (SerializedField field : serializedFields) { fieldsMap.put(field.getNamePart().getName(), field.getMajorType()); } return fieldsMap; } }
final String fieldName = field.getNamePart().getName();
mergeNamePart(other.getNamePart());
public static Field getFieldForSerializedField(SerializedField serializedField) { String name = serializedField.getNamePart().getName(); org.apache.arrow.vector.types.Types.MinorType arrowMinorType = getArrowMinorType(serializedField.getMajorType().getMinorType()); switch(serializedField.getMajorType().getMinorType()) { Preconditions.checkState(childList.size() > 0, "children should start with validity vector buffer"); SerializedField bits = childList.get(0); Preconditions.checkState(bits.getNamePart().getName().equals("$bits$"), "children should start with validity vector buffer: %s", childList); for (int i = 1; i < childList.size(); i++) { SerializedField child = childList.get(i);
/** * computes the number of buffers for a given serialized field * @param field serialized field * @param buffers total buffers in the batch * @param buffersStart starting buffer for the passed field * * @return number of buffers for the field */ private static int fieldBuffersCount(SerializedField field, ByteBuf[] buffers, final int buffersStart) { int totalBufferWidth = 0; int lastIndex = buffersStart; while (totalBufferWidth < field.getBufferLength() && lastIndex < buffersStart + buffers.length) { ByteBuf buf = buffers[lastIndex]; totalBufferWidth += buf.readableBytes(); ++lastIndex; } if (totalBufferWidth != field.getBufferLength()) { throw new IllegalStateException("not enough buffers for field " + field.getNamePart().getName() + " of type " + field.getMajorType()); } return lastIndex - buffersStart; }
@Test public void testColumnOrderingWithUnionVector() throws Exception { List<QueryDataBatch> results = null; try { results = testRunAndReturn(QueryType.SQL, "SELECT * FROM cp.\"type_changes.json\""); final RecordBatchDef def = results.get(0).getHeader().getDef(); assertEquals(2, def.getFieldCount()); assertEquals("a", def.getField(0).getNamePart().getName()); assertEquals("b", def.getField(1).getNamePart().getName()); } finally { if (results != null) { for(QueryDataBatch r : results) { r.release(); } } } }
public void writeTo(io.protostuff.Output output, com.dremio.exec.proto.UserBitShared.SerializedField message) throws java.io.IOException { if(message.hasMajorType()) output.writeObject(1, message.getMajorType(), com.dremio.common.types.SchemaTypeProtos.MajorType.WRITE, false); if(message.hasNamePart()) output.writeObject(2, message.getNamePart(), com.dremio.exec.proto.SchemaUserBitShared.NamePart.WRITE, false); for(com.dremio.exec.proto.UserBitShared.SerializedField child : message.getChildList()) output.writeObject(3, child, com.dremio.exec.proto.SchemaUserBitShared.SerializedField.WRITE, true); if(message.hasValueCount()) output.writeInt32(4, message.getValueCount(), false); if(message.hasVarByteLength()) output.writeInt32(5, message.getVarByteLength(), false); if(message.hasBufferLength()) output.writeInt32(7, message.getBufferLength(), false); } public boolean isInitialized(com.dremio.exec.proto.UserBitShared.SerializedField message)
private static Set<String> extractSchema(QueryWritableBatch batch) { List<SerializedField> fields = batch.getHeader().getDef().getFieldList(); Set<String> schema = Sets.newHashSet(); for (SerializedField field : fields) { schema.add(field.getNamePart().getName()); } return schema; }
public void load(SerializedField metadata, ArrowBuf buffer) { Preconditions.checkArgument(vector.name.equals(metadata.getNamePart().getName()), "The field %s doesn't match the provided metadata %s.", vector.name, metadata); final int valueCount = metadata.getValueCount(); final int expectedLength = vector.getValidityBufferSizeFromCount(valueCount); final int actualLength = metadata.getBufferLength(); assert expectedLength == actualLength: "expected and actual buffer sizes do not match"; vector.clear(); vector.valueBuffer = buffer.slice(0, actualLength); vector.valueBuffer.writerIndex(actualLength); vector.valueBuffer.retain(); vector.valueCount = valueCount; }