void checkStateAndLogIfNecessary() { if (!fragmentStarted) { final DateTimeFormatter formatter = ISODateTimeFormat.dateTime(); if (isCancelled()) { logger.warn("Received cancel request at {} for fragment {} that was never started", formatter.print(cancellationTime), QueryIdHelper.getQueryIdentifier(handle)); } FragmentEvent event; while ((event = finishedReceivers.poll()) != null) { logger.warn("Received early fragment termination at {} for path {} {} -> {} for a fragment that was never started", formatter.print(event.time), QueryIdHelper.getQueryId(handle.getQueryId()), QueryIdHelper.getFragmentId(event.handle), QueryIdHelper.getFragmentId(handle) ); } } }
public static String getExecutorThreadName(final FragmentHandle fragmentHandle) { return String.format("%s:frag:%s:%s", getQueryId(fragmentHandle.getQueryId()), fragmentHandle.getMajorFragmentId(), fragmentHandle.getMinorFragmentId()); }
private void setupOutputStream() { try { final String qid = QueryIdHelper.getQueryId(handle.getQueryId()); final int majorFragmentId = handle.getMajorFragmentId(); final int minorFragmentId = handle.getMinorFragmentId(); final String id = String.format("spool-%s.%s.%s.%s.%s", qid, majorFragmentId, minorFragmentId, oppositeId, bufferIndex); this.spillManager = new SpillManager(config, null, id, SPOOLING_CONFIG, spillService, "spooling sorted exchange"); this.spillFile = spillManager.getSpillFile("batches"); outputStream = spillFile.create(); } catch(Exception ex) { throw Throwables.propagate(ex); } }
@Override public void noMoreToConsume() { state = State.DONE; // make sure we send a schema batch. if(batchesSent == 0){ WritableBatch writable = WritableBatch.getBatchNoHVWrap(0, incoming, false); final QueryData header = QueryData.newBuilder() .setQueryId(context.getFragmentHandle().getQueryId()) .setRowCount(0) .setDef(writable.getDef()) .build(); writable.close(); final QueryWritableBatch batch = new QueryWritableBatch(header); stats.startWait(); try { execToCoord.sendData(batch); } finally { stats.stopWait(); } } }
@Override public void consumeData(int records) { Preconditions.checkArgument(records > 0); final FragmentWritableBatch batch = FragmentWritableBatch.create( handle.getQueryId(), handle.getMajorFragmentId(), handle.getMinorFragmentId(), recMajor, incoming, oppositeHandle.getMinorFragmentId() ); updateStats(batch); context.getStats().startWait(); try { tunnel.sendRecordBatch(batch); } finally { context.getStats().stopWait(); } }
/** * Builds and starts a new query, if sufficient resources are available. * In case resources are not available immediately, the query will be started later, when resources become available */ public void buildAndStartQuery(final PlanFragment firstFragment, final SchedulingInfo schedulingInfo, final QueryStarter queryStarter) { final QueryId queryId = firstFragment.getHandle().getQueryId(); // Note: The temporary reference count (released in the finally clause, below) is necessary to guard against races // between potential workload ticket modifications and this function (creation of fragments for queries on the workload) WorkloadTicket workloadTicket = workloadTicketDepot.getWorkloadTicket(schedulingInfo); try { final long queryMaxAllocation = workloadTicket.getChildMaxAllocation(firstFragment.getContext().getQueryMaxAllocation()); workloadTicket.buildAndStartQuery(queryId, queryMaxAllocation, firstFragment.getForeman(), firstFragment.getAssignment(), tunnelCreator, queryStarter); } finally { workloadTicket.release(); } }
public Builder mergeFrom(com.dremio.exec.proto.ExecProtos.FragmentHandle other) { if (other == com.dremio.exec.proto.ExecProtos.FragmentHandle.getDefaultInstance()) return this; if (other.hasQueryId()) { mergeQueryId(other.getQueryId()); } if (other.hasMajorFragmentId()) { setMajorFragmentId(other.getMajorFragmentId()); } if (other.hasMinorFragmentId()) { setMinorFragmentId(other.getMinorFragmentId()); } if (other.hasParentQueryId()) { mergeParentQueryId(other.getParentQueryId()); } this.mergeUnknownFields(other.getUnknownFields()); return this; }
@Override public FragmentInfo apply(final FragmentExecutor fragmentExecutor) { final FragmentStatus status = fragmentExecutor.getStatus(); final ExecProtos.FragmentHandle handle = fragmentExecutor.getHandle(); final MinorFragmentProfile profile = status == null ? null : status.getProfile(); Long memoryUsed = profile == null ? 0 : profile.getMemoryUsed(); Long rowsProcessed = profile == null ? 0 : getRowsProcessed(profile); Timestamp startTime = profile == null ? new Timestamp(0) : new Timestamp(profile.getStartTime()); return new FragmentInfo(dbContext.get().getEndpoint().getAddress(), QueryIdHelper.getQueryId(handle.getQueryId()), handle.getMajorFragmentId(), handle.getMinorFragmentId(), memoryUsed, rowsProcessed, startTime, fragmentExecutor.getBlockingStatus(), fragmentExecutor.getTaskDescriptor()); }
private void sendTermination() { final ExecProtos.FragmentHandle handle = context.getFragmentHandle(); stats.startWait(); for (MinorFragmentEndpoint destination : config.getDestinations()) { // don't send termination message if the receiver fragment is already terminated. if (remainingReceivers.get(destination.getId()) == 0) { ExecRPC.FragmentStreamComplete completion = ExecRPC.FragmentStreamComplete.newBuilder() .setQueryId(handle.getQueryId()) .setSendingMajorFragmentId(handle.getMajorFragmentId()) .setSendingMinorFragmentId(handle.getMinorFragmentId()) .setReceivingMajorFragmentId(config.getOppositeMajorFragmentId()) .addReceivingMinorFragmentId(destination.getId()) .build(); tunnelProvider.getExecTunnel(destination.getEndpoint()).sendStreamComplete(completion); } } stats.stopWait(); }
@Override public void noMoreToConsume() { for (int i = 0; i < tunnels.size(); ++i) { final FragmentStreamComplete completion = FragmentStreamComplete.newBuilder() .setQueryId(handle.getQueryId()) .setSendingMajorFragmentId(handle.getMajorFragmentId()) .setSendingMinorFragmentId(handle.getMinorFragmentId()) .setReceivingMajorFragmentId(config.getOppositeMajorFragmentId()) .addAllReceivingMinorFragmentId(minorFragments.get(i)) .build(); tunnels.get(i).sendStreamComplete(completion); } state = State.DONE; }
@Override public void noMoreToConsume() { for (int i = 0; i < tunnels.length; ++i) { final FragmentStreamComplete completion = FragmentStreamComplete.newBuilder() .setQueryId(handle.getQueryId()) .setSendingMajorFragmentId(handle.getMajorFragmentId()) .setSendingMinorFragmentId(handle.getMinorFragmentId()) .setReceivingMajorFragmentId(config.getOppositeMajorFragmentId()) .addAllReceivingMinorFragmentId(Ints.asList(receivingMinorFragments[i])) .build(); tunnels[i].sendStreamComplete(completion); } state = State.DONE; }
public static String getQueryIdentifier(final FragmentHandle h) { return getQueryId(h.getQueryId()) + ":" + h.getMajorFragmentId() + ":" + h.getMinorFragmentId(); }
public void informUpstreamIfNecessary(){ if(!done){ final FinishedReceiver message = FinishedReceiver.newBuilder() .setReceiver(handle) .setSender(FragmentHandle.newBuilder() .setQueryId(handle.getQueryId()) .setMajorFragmentId(config.getOppositeMajorFragmentId()) .setMinorFragmentId(sendingMinorFragmentId)) .build(); tunnelProvider.getExecTunnel(sendingNode).informReceiverFinished(message); done = true; } }
public void writeTo(io.protostuff.Output output, com.dremio.exec.proto.ExecProtos.FragmentHandle message) throws java.io.IOException { if(message.hasQueryId()) output.writeObject(1, message.getQueryId(), com.dremio.exec.proto.SchemaUserBitShared.QueryId.WRITE, false); if(message.hasMajorFragmentId()) output.writeInt32(2, message.getMajorFragmentId(), false); if(message.hasMinorFragmentId()) output.writeInt32(3, message.getMinorFragmentId(), false); if(message.hasParentQueryId()) output.writeObject(4, message.getParentQueryId(), com.dremio.exec.proto.SchemaUserBitShared.QueryId.WRITE, false); } public boolean isInitialized(com.dremio.exec.proto.ExecProtos.FragmentHandle message)
public void updateStatus(FragmentStatus status) { AttemptManager manager = attemptManager; if(manager != null && manager.getQueryId().equals(status.getHandle().getQueryId())){ manager.updateStatus(status); } }
public static String getFileNameForQueryFragment(FragmentHandle handle, String location, String tag) { String qid = QueryIdHelper.getQueryId(handle.getQueryId()); int majorFragmentId = handle.getMajorFragmentId(); int minorFragmentId = handle.getMinorFragmentId(); String fileName = String.format("%s//%s_%s_%s_%s", location, qid, majorFragmentId, minorFragmentId, tag); return fileName; }
@Override public void fragmentStatusUpdate(FragmentStatus status) throws RpcException { ExternalId id = ExternalIdHelper.toExternal(status.getHandle().getQueryId()); ManagedForeman managed = externalIdToForeman.get(id); if (managed == null) { // TODO(DX-7242): this is a little chatty since a failed query will often log a bunch of fragments // We need a better mechanism to debug this. logger.info("A fragment status message arrived post query termination, dropping. Fragment [{}] reported a state of {}.", QueryIdHelper.getFragmentId(status.getHandle()), status.getProfile().getState()); } else { managed.foreman.updateStatus(status); } }
public void sendTermination() { final FragmentHandle handle = context.getFragmentHandle(); FragmentStreamComplete completion = FragmentStreamComplete.newBuilder() .setQueryId(handle.getQueryId()) .setSendingMajorFragmentId(handle.getMajorFragmentId()) .setSendingMinorFragmentId(handle.getMinorFragmentId()) .setReceivingMajorFragmentId(operator.getOppositeMajorFragmentId()) .addReceivingMinorFragmentId(oppositeMinorFragmentId) .build(); tunnel.sendStreamComplete(completion); dropAll = true; }
@Override public void noMoreToConsume() { FragmentStreamComplete completionMessage = FragmentStreamComplete.newBuilder() .setQueryId(handle.getQueryId()) .setSendingMajorFragmentId(handle.getMajorFragmentId()) .setSendingMinorFragmentId(handle.getMinorFragmentId()) .setReceivingMajorFragmentId(oppositeHandle.getMajorFragmentId()) .addReceivingMinorFragmentId(oppositeHandle.getMinorFragmentId()) .build(); tunnel.sendStreamComplete(completionMessage); }
public VectorRecordMaterializer(OperatorContext context, VectorAccessible incoming) { this.queryId = context.getFragmentHandle().getQueryId(); this.incoming = incoming; this.allocator = context.getAllocator(); assert incoming.getSchema() != null : "Schema must be defined."; }