public Builder mergeFrom(com.dremio.exec.proto.ExecRPC.FragmentRecordBatch other) { if (other == com.dremio.exec.proto.ExecRPC.FragmentRecordBatch.getDefaultInstance()) return this; if (other.hasQueryId()) { mergeQueryId(other.getQueryId()); } if (other.hasReceivingMajorFragmentId()) { setReceivingMajorFragmentId(other.getReceivingMajorFragmentId()); } if (!other.receivingMinorFragmentId_.isEmpty()) { if (receivingMinorFragmentId_.isEmpty()) { receivingMinorFragmentId_ = other.receivingMinorFragmentId_; bitField0_ = (bitField0_ & ~0x00000004); } else { ensureReceivingMinorFragmentIdIsMutable(); receivingMinorFragmentId_.addAll(other.receivingMinorFragmentId_); } onChanged(); } if (other.hasSendingMajorFragmentId()) { setSendingMajorFragmentId(other.getSendingMajorFragmentId()); } if (other.hasSendingMinorFragmentId()) { setSendingMinorFragmentId(other.getSendingMinorFragmentId()); } if (other.hasArrowRecordBatch()) { setArrowRecordBatch(other.getArrowRecordBatch()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
size += 1 * getReceivingMinorFragmentIdList().size(); .computeBytesSize(6, arrowRecordBatch_); size += getUnknownFields().getSerializedSize(); memoizedSerializedSize = size; return size;
hash = (19 * hash) + getDescriptorForType().hashCode(); if (hasQueryId()) { hash = (37 * hash) + QUERY_ID_FIELD_NUMBER; hash = (53 * hash) + getQueryId().hashCode(); if (hasReceivingMajorFragmentId()) { hash = (37 * hash) + RECEIVING_MAJOR_FRAGMENT_ID_FIELD_NUMBER; hash = (53 * hash) + getReceivingMajorFragmentId(); if (getReceivingMinorFragmentIdCount() > 0) { hash = (37 * hash) + RECEIVING_MINOR_FRAGMENT_ID_FIELD_NUMBER; hash = (53 * hash) + getReceivingMinorFragmentIdList().hashCode(); if (hasSendingMajorFragmentId()) { hash = (37 * hash) + SENDING_MAJOR_FRAGMENT_ID_FIELD_NUMBER; hash = (53 * hash) + getSendingMajorFragmentId(); if (hasSendingMinorFragmentId()) { hash = (37 * hash) + SENDING_MINOR_FRAGMENT_ID_FIELD_NUMBER; hash = (53 * hash) + getSendingMinorFragmentId(); if (hasArrowRecordBatch()) { hash = (37 * hash) + ARROW_RECORD_BATCH_FIELD_NUMBER; hash = (53 * hash) + getArrowRecordBatch().hashCode(); hash = (29 * hash) + getUnknownFields().hashCode(); memoizedHashCode = hash; return hash;
result = result && (hasQueryId() == other.hasQueryId()); if (hasQueryId()) { result = result && getQueryId() .equals(other.getQueryId()); result = result && (hasReceivingMajorFragmentId() == other.hasReceivingMajorFragmentId()); if (hasReceivingMajorFragmentId()) { result = result && (getReceivingMajorFragmentId() == other.getReceivingMajorFragmentId()); result = result && getReceivingMinorFragmentIdList() .equals(other.getReceivingMinorFragmentIdList()); result = result && (hasSendingMajorFragmentId() == other.hasSendingMajorFragmentId()); if (hasSendingMajorFragmentId()) { result = result && (getSendingMajorFragmentId() == other.getSendingMajorFragmentId()); result = result && (hasSendingMinorFragmentId() == other.hasSendingMinorFragmentId()); if (hasSendingMinorFragmentId()) { result = result && (getSendingMinorFragmentId() == other.getSendingMinorFragmentId()); result = result && (hasArrowRecordBatch() == other.hasArrowRecordBatch()); if (hasArrowRecordBatch()) { result = result && getArrowRecordBatch() .equals(other.getArrowRecordBatch()); getUnknownFields().equals(other.getUnknownFields()); return result;
int size = 0; try { RecordBatch recordBatch = RecordBatch.getRootAsRecordBatch(batch.getHeader().getArrowRecordBatch().asReadOnlyByteBuffer()); if (batch.getBody() == null) { for (VectorWrapper<?> w : container) {
private void handleFragmentRecordBatch(FragmentRecordBatch fragmentBatch, ByteBuf body, ResponseSender sender) throws RpcException { final AckSender ack = new AckSender(sender); // increment so we don't get false returns. ack.increment(); try { final IncomingDataBatch batch = new IncomingDataBatch(fragmentBatch, (ArrowBuf) body, ack); final int targetCount = fragmentBatch.getReceivingMinorFragmentIdCount(); // randomize who gets first transfer (and thus ownership) so memory usage // is balanced when we're sharing amongst // multiple fragments. final int firstOwner = ThreadLocalRandom.current().nextInt(targetCount); submit(batch, firstOwner, targetCount); submit(batch, 0, firstOwner); // decrement the extra reference we grabbed at the top. ack.sendOk(); } catch (IOException | FragmentSetupException e) { logger.error("Failure while getting fragment manager. {}", QueryIdHelper.getQueryIdentifiers(fragmentBatch.getQueryId(), fragmentBatch.getReceivingMajorFragmentId(), fragmentBatch.getReceivingMinorFragmentIdList()), e); ack.clear(); sender.send(new Response(RpcType.ACK, Acks.FAIL)); } }
public FragmentWritableBatch( final QueryId queryId, final int sendMajorFragmentId, final int sendMinorFragmentId, final int receiveMajorFragmentId, ArrowRecordBatch recordBatch, final int... receiveMinorFragmentId){ this.buffers = recordBatch.getBuffers().toArray(new ByteBuf[0]); this.recordCount = recordBatch.getLength(); FlatBufferBuilder fbbuilder = new FlatBufferBuilder(); fbbuilder.finish(recordBatch.writeTo(fbbuilder)); ByteBuffer arrowRecordBatch = fbbuilder.dataBuffer(); final FragmentRecordBatch.Builder builder = FragmentRecordBatch.newBuilder() .setArrowRecordBatch(ByteString.copyFrom(arrowRecordBatch)) .setQueryId(queryId) .setReceivingMajorFragmentId(receiveMajorFragmentId) .setSendingMajorFragmentId(sendMajorFragmentId) .setSendingMinorFragmentId(sendMinorFragmentId); for(final int i : receiveMinorFragmentId){ builder.addReceivingMinorFragmentId(i); } this.header = builder.build(); }
public void batchArrived(final IncomingDataBatch incomingBatch) throws FragmentSetupException, IOException { if(!incomingBatch.checkAcceptance(allocator.getHeadroom())){ deferredException.addException(UserException.memoryError() .message("Out of memory while receiving incoming message. Message size: %d, Current thread allocation: %d, thread limit: %d.", incomingBatch.size(), allocator.getAllocatedMemory(), allocator.getLimit()) .build(logger)); return; } // we want to make sure that we only generate local record batch reference in the case that we're not closed. // Otherwise we would leak memory. try (AutoCloseableLock lock = sharedIncomingBatchLock.open()) { if (closed) { return; } final DataCollector collector = collector(incomingBatch.getHeader().getSendingMajorFragmentId()); synchronized (collector) { try(final RawFragmentBatch newRawFragmentBatch = incomingBatch.newRawFragmentBatch(allocator)){ collector.batchArrived(incomingBatch.getHeader().getSendingMinorFragmentId(), newRawFragmentBatch); } } } }
@Override protected void enqueueInner(RawFragmentBatch batch) { assert batch.getHeader().getSendingMajorFragmentId() == oppositeId; logger.debug("Enqueue batch. Current buffer size: {}. Sending fragment: {}", bufferQueue.size(), batch.getHeader().getSendingMajorFragmentId()); RawFragmentBatchWrapper wrapper; boolean spoolCurrentBatch = isCurrentlySpooling(); wrapper = new RawFragmentBatchWrapper(batch, !spoolCurrentBatch); currentBatchesInMemory++; if (spoolCurrentBatch) { addBatchForSpooling(wrapper); } bufferQueue.add(wrapper); if (!spoolCurrentBatch && currentBatchesInMemory >= threshold) { logger.debug("Buffer size {} greater than threshold {}. Start spooling to disk", currentBatchesInMemory, threshold); startSpooling(); } }
public Builder toBuilder() { return newBuilder(this); }
public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(com.dremio.exec.proto.ExecRPC.FragmentRecordBatch prototype) {
public void writeTo(io.protostuff.Output output, com.dremio.exec.proto.ExecRPC.FragmentRecordBatch message) throws java.io.IOException { if(message.hasQueryId()) output.writeObject(1, message.getQueryId(), com.dremio.exec.proto.SchemaUserBitShared.QueryId.WRITE, false); if(message.hasReceivingMajorFragmentId()) output.writeInt32(2, message.getReceivingMajorFragmentId(), false); for(int receivingMinorFragmentId : message.getReceivingMinorFragmentIdList()) output.writeInt32(3, receivingMinorFragmentId, true); if(message.hasSendingMajorFragmentId()) output.writeInt32(4, message.getSendingMajorFragmentId(), false); if(message.hasSendingMinorFragmentId()) output.writeInt32(5, message.getSendingMinorFragmentId(), false); if(message.hasArrowRecordBatch()) output.writeByteArray(6, message.getArrowRecordBatch().toByteArray(), false); } public boolean isInitialized(com.dremio.exec.proto.ExecRPC.FragmentRecordBatch message)
public com.dremio.exec.proto.ExecRPC.FragmentRecordBatch getDefaultInstanceForType() { return com.dremio.exec.proto.ExecRPC.FragmentRecordBatch.getDefaultInstance(); }
public static Builder newBuilder(com.dremio.exec.proto.ExecRPC.FragmentRecordBatch prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); }
@Override public MessageLite getResponseDefaultInstance(int rpcType) throws RpcException { switch (rpcType) { case RpcType.ACK_VALUE: return Ack.getDefaultInstance(); case RpcType.REQ_RECORD_BATCH_VALUE: return FragmentRecordBatch.getDefaultInstance(); case RpcType.REQ_STREAM_COMPLETE_VALUE: return FragmentStreamComplete.getDefaultInstance(); default: throw new UnsupportedOperationException(); } }
/** * <code>optional bytes arrow_record_batch = 6;</code> */ public Builder clearArrowRecordBatch() { bitField0_ = (bitField0_ & ~0x00000020); arrowRecordBatch_ = getDefaultInstance().getArrowRecordBatch(); onChanged(); return this; }
private RawFragmentBatch newBatch(int index) { ArrowBuf buffer = allocator.buffer(batchAllocateSize); buffer.setInt(0, index); return new RawFragmentBatch(FragmentRecordBatch.getDefaultInstance(), buffer, ackSender); }
public com.dremio.exec.proto.ExecRPC.FragmentRecordBatch.Builder newMessage() { return com.dremio.exec.proto.ExecRPC.FragmentRecordBatch.newBuilder(); } public java.lang.String getFieldName(int number)
private static FragmentHandle getHandle(FragmentRecordBatch batch, int index) { return FragmentHandle.newBuilder() .setQueryId(batch.getQueryId()) .setMajorFragmentId(batch.getReceivingMajorFragmentId()) .setMinorFragmentId(batch.getReceivingMinorFragmentId(index)) .build(); }
@Override public Void answer(InvocationOnMock invocation) throws Throwable { final FragmentWritableBatch batch = (FragmentWritableBatch) invocation.getArguments()[0]; for (int fragId : batch.getHeader().getReceivingMinorFragmentIdList()) { rowCountPerFragment[fragId] += batch.getRecordCount(); } for(ByteBuf b : batch.getBuffers()){ b.release(); } return null; }}).when(tunnel).sendRecordBatch(any(FragmentWritableBatch.class));