@Override public int routeInputErrorEventToSource(InputReadErrorEvent event, int destinationTaskIndex, int destinationFailedInputIndex) { return event.getIndex(); } }
private void processEvents() throws SerDeException, IOException, InterruptedException { int eventCount = 0; while (true) { Object element = queue.take(); if (element == endOfEvents) { // we're done processing events break; } InputInitializerEvent event = (InputInitializerEvent) element; LOG.info("Input event: " + event.getTargetInputName() + ", " + event.getTargetVertexName() + ", " + (event.getUserPayload().limit() - event.getUserPayload().position())); processPayload(event.getUserPayload(), event.getSourceVertexName()); eventCount += 1; } LOG.info("Received events: " + eventCount); }
public void addEvent(InputInitializerEvent event) { synchronized(sourcesWaitingForEvents) { if (sourcesWaitingForEvents.contains(event.getSourceVertexName())) { ++totalEventCount; numEventsSeenPerSource.get(event.getSourceVertexName()).increment(); if(!queue.offer(event)) { throw new IllegalStateException("Queue full"); } checkForSourceCompletion(event.getSourceVertexName()); } } }
private List<Event> createEventList(boolean sendSerializedEvents, InputSplitInfoMem inputSplitInfo) { List<Event> events = Lists.newArrayListWithCapacity(inputSplitInfo.getNumTasks() + 1); InputConfigureVertexTasksEvent configureVertexEvent = InputConfigureVertexTasksEvent.create(inputSplitInfo.getNumTasks(), VertexLocationHint.create(inputSplitInfo.getTaskLocationHints()), InputSpecUpdate.getDefaultSinglePhysicalInputSpecUpdate()); events.add(configureVertexEvent); if (sendSerializedEvents) { MRSplitsProto splitsProto = inputSplitInfo.getSplitsProto(); int count = 0; for (MRSplitProto mrSplit : splitsProto.getSplitsList()) { InputDataInformationEvent diEvent = InputDataInformationEvent.createWithSerializedPayload( count++, mrSplit.toByteString().asReadOnlyByteBuffer()); events.add(diEvent); } } else { int count = 0; for (org.apache.hadoop.mapred.InputSplit split : inputSplitInfo.getOldFormatSplits()) { InputDataInformationEvent diEvent = InputDataInformationEvent.createWithObjectPayload( count++, split); events.add(diEvent); } } return events; }
private FileSplit getFileSplitFromEvent(InputDataInformationEvent event) throws IOException { InputSplit inputSplit = null; if (event.getDeserializedUserPayload() != null) { inputSplit = (InputSplit) event.getDeserializedUserPayload(); } else { MRSplitProto splitProto = MRSplitProto.parseFrom(ByteString.copyFrom(event.getUserPayload())); SerializationFactory serializationFactory = new SerializationFactory(new Configuration()); inputSplit = MRInputHelpers.createOldFormatSplitFromUserPayload(splitProto, serializationFactory); } if (!(inputSplit instanceof FileSplit)) { throw new UnsupportedOperationException( "Cannot handle splits other than FileSplit for the moment. Current input split type: " + inputSplit.getClass().getSimpleName()); } return (FileSplit) inputSplit; }
@Override public void closeOp(boolean abort) throws HiveException { if (!abort) { TezContext context = (TezContext) TezContext.get(); String vertexName = getConf().getVertexName(); String inputName = getConf().getInputName(); byte[] payload = null; if (hasReachedMaxSize) { initDataBuffer(true); } payload = new byte[buffer.getLength()]; System.arraycopy(buffer.getData(), 0, payload, 0, buffer.getLength()); Event event = InputInitializerEvent.create(vertexName, inputName, ByteBuffer.wrap(payload, 0, payload.length)); if (LOG.isInfoEnabled()) { LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName + ". Payload size = " + payload.length); } context.getTezProcessorContext().sendEvents(Collections.singletonList(event)); } }
tezEvents.add(new TezEvent(new TaskAttemptKilledEvent(diagnostics), srcMeta == null ? updateEventMetadata : srcMeta)); } else { tezEvents.add(new TezEvent(new TaskAttemptFailedEvent(diagnostics, taskFailureType), srcMeta == null ? updateEventMetadata : srcMeta));
@Override public void heartbeat(TezHeartbeatRequest request) { List<TezEvent> inEvents = request.getEvents(); for (TezEvent tezEvent : ListUtils.emptyIfNull(inEvents)) { EventType eventType = tezEvent.getEventType(); try { switch (eventType) { case TASK_ATTEMPT_COMPLETED_EVENT: sendOrQueueEvent(ReaderEvent.doneEvent()); break; case TASK_ATTEMPT_FAILED_EVENT: TaskAttemptFailedEvent taskFailedEvent = (TaskAttemptFailedEvent) tezEvent.getEvent(); sendOrQueueEvent(ReaderEvent.errorEvent(taskFailedEvent.getDiagnostics())); break; case TASK_STATUS_UPDATE_EVENT: // If we want to handle counters break; default: LOG.warn("Unhandled event type " + eventType); break; } } catch (Exception err) { LOG.error("Error during heartbeat responder:", err); } } }
@Override public void handleEvents(List<Event> arg0) { // As of now only used for Bucket MapJoin, there is exactly one event in the list. assert arg0.size() <= 1; for (Event event : arg0) { CustomProcessorEvent cpEvent = (CustomProcessorEvent) event; ByteBuffer buffer = cpEvent.getPayload(); // Get int view of the buffer IntBuffer intBuffer = buffer.asIntBuffer(); jobConf.setInt(Constants.LLAP_NUM_BUCKETS, intBuffer.get(0)); jobConf.setInt(Constants.LLAP_BUCKET_ID, intBuffer.get(1)); } }
private void sendBucketIdsToProcessor() { for (Entry<Integer, Collection<Integer>> entry : bucketToTaskMap.asMap().entrySet()) { int bucketNum = entry.getKey(); for (Integer taskId : entry.getValue()) { // Create payload ByteBuffer buffer = ByteBuffer.allocate(8); buffer.putInt(numBuckets); buffer.putInt(bucketNum); buffer.flip(); // Create the event and send it tez. Tez will route it to appropriate processor CustomProcessorEvent cpEvent = CustomProcessorEvent.create(buffer); context.sendEventToProcessor(Collections.singletonList(cpEvent), taskId); } } }
/** * Sends out final events for task success. * @param taskAttemptID * @return * @throws IOException * indicates an RPC communication failure. * @throws TezException * indicates an exception somewhere in the AM. */ private boolean taskSucceeded(TezTaskAttemptID taskAttemptID) throws IOException, TezException { // Ensure only one final event is ever sent. if (!finalEventQueued.getAndSet(true)) { TezEvent statusUpdateEvent = new TezEvent( getStatusUpdateEvent(true, true), updateEventMetadata); TezEvent taskCompletedEvent = new TezEvent(new TaskAttemptCompletedEvent(), updateEventMetadata); if (LOG.isDebugEnabled()) { LOG.debug("Invoking OOB heartbeat for successful attempt: {}, isTaskDone={}", taskAttemptID, task.isTaskDone()); } completionListener.fragmentCompleting(fragmentRequestId, SchedulerFragmentCompletingListener.State.SUCCESS); return !heartbeat(Lists.newArrayList(statusUpdateEvent, taskCompletedEvent)).shouldDie; } else { LOG.warn("A final task state event has already been sent. Not sending again"); return askedToDie.get(); } }
private TaskStatusUpdateEvent getStatusUpdateEvent(boolean sendCounters, boolean isLast) { TezCounters counters = null; TaskStatistics stats = null; float progress = 0; if (task.hasInitialized()) { progress = task.getProgress(); // TODO HIVE-12449. Make use of progress notifications once Hive starts sending them out. // progressNotified = task.getAndClearProgressNotification(); if (sendCounters) { // send these potentially large objects at longer intervals to avoid overloading the AM counters = task.getCounters(); if (wmCounters != null && counters != null) { wmCounters.dumpToTezCounters(counters, isLast); } stats = task.getTaskStatistics(); } } return new TaskStatusUpdateEvent(counters, progress, stats, true); }
public static TezEvent toTezEvent(NotTezEvent nte) throws InvalidProtocolBufferException { EventMetaData sourceMetaData = new EventMetaData(EventMetaData.EventProducerConsumerType.INPUT, nte.getVertexName(), "NULL_VERTEX", null); EventMetaData destMetaData = new EventMetaData(EventMetaData.EventProducerConsumerType.INPUT, nte.getVertexName(), nte.getDestInputName(), null); InputDataInformationEvent event = ProtoConverters.convertRootInputDataInformationEventFromProto( RootInputDataInformationEventProto.parseFrom(nte.getInputEventProtoBytes())); TezEvent tezEvent = new TezEvent(event, sourceMetaData, System.currentTimeMillis()); tezEvent.setDestinationInfo(destMetaData); return tezEvent; } }
public static Signable createSignableNotTezEvent( InputDataInformationEvent event, String vertexName, String destInputName) { final NotTezEvent.Builder builder = NotTezEvent.newBuilder().setInputEventProtoBytes( ProtoConverters.convertRootInputDataInformationEventToProto(event).toByteString()) .setVertexName(vertexName).setDestInputName(destInputName); return new Signable() { @Override public void setSignInfo(int masterKeyId) { builder.setKeyId(masterKeyId); } @Override public byte[] serialize() throws IOException { NotTezEvent nte = builder.build(); ByteArrayOutputStream baos = new ByteArrayOutputStream(nte.getSerializedSize()); nte.writeTo(baos); return baos.toByteArray(); } }; }
private void processEvents() throws SerDeException, IOException, InterruptedException { int eventCount = 0; while (true) { Object element = queue.take(); if (element == endOfEvents) { // we're done processing events break; } InputInitializerEvent event = (InputInitializerEvent) element; LOG.info("Input event: " + event.getTargetInputName() + ", " + event.getTargetVertexName() + ", " + (event.getUserPayload().limit() - event.getUserPayload().position())); processPayload(event.getUserPayload(), event.getSourceVertexName()); eventCount += 1; } LOG.info("Received events: " + eventCount); }
private List<Event> createEventList(boolean sendSerializedEvents, InputSplitInfoMem inputSplitInfo) { List<Event> events = Lists.newArrayListWithCapacity(inputSplitInfo.getNumTasks() + 1); InputConfigureVertexTasksEvent configureVertexEvent = InputConfigureVertexTasksEvent.create(inputSplitInfo.getNumTasks(), VertexLocationHint.create(inputSplitInfo.getTaskLocationHints()), InputSpecUpdate.getDefaultSinglePhysicalInputSpecUpdate()); events.add(configureVertexEvent); if (sendSerializedEvents) { MRSplitsProto splitsProto = inputSplitInfo.getSplitsProto(); int count = 0; for (MRSplitProto mrSplit : splitsProto.getSplitsList()) { InputDataInformationEvent diEvent = InputDataInformationEvent.createWithSerializedPayload( count++, mrSplit.toByteString().asReadOnlyByteBuffer()); events.add(diEvent); } } else { int count = 0; for (org.apache.hadoop.mapred.InputSplit split : inputSplitInfo.getOldFormatSplits()) { InputDataInformationEvent diEvent = InputDataInformationEvent.createWithObjectPayload( count++, split); events.add(diEvent); } } return events; }
private FileSplit getFileSplitFromEvent(InputDataInformationEvent event) throws IOException { InputSplit inputSplit = null; if (event.getDeserializedUserPayload() != null) { inputSplit = (InputSplit) event.getDeserializedUserPayload(); } else { MRSplitProto splitProto = MRSplitProto.parseFrom(ByteString.copyFrom(event.getUserPayload())); SerializationFactory serializationFactory = new SerializationFactory(new Configuration()); inputSplit = MRInputHelpers.createOldFormatSplitFromUserPayload(splitProto, serializationFactory); } if (!(inputSplit instanceof FileSplit)) { throw new UnsupportedOperationException( "Cannot handle splits other than FileSplit for the moment. Current input split type: " + inputSplit.getClass().getSimpleName()); } return (FileSplit) inputSplit; }
@Override public int routeInputErrorEventToSource(InputReadErrorEvent event, int destinationTaskIndex, int destinationFailedInputIndex) { return event.getIndex(); } }
@Override public void closeOp(boolean abort) throws HiveException { if (!abort) { TezContext context = (TezContext) TezContext.get(); String vertexName = getConf().getVertexName(); String inputName = getConf().getInputName(); byte[] payload = null; if (hasReachedMaxSize) { initDataBuffer(true); } payload = new byte[buffer.getLength()]; System.arraycopy(buffer.getData(), 0, payload, 0, buffer.getLength()); Event event = InputInitializerEvent.create(vertexName, inputName, ByteBuffer.wrap(payload, 0, payload.length)); if (isLogInfoEnabled) { LOG.info("Sending Tez event to vertex = " + vertexName + ", input = " + inputName + ". Payload size = " + payload.length); } context.getTezProcessorContext().sendEvents(Collections.singletonList(event)); } }
public void addEvent(InputInitializerEvent event) { synchronized(sourcesWaitingForEvents) { if (sourcesWaitingForEvents.contains(event.getSourceVertexName())) { ++totalEventCount; numEventsSeenPerSource.get(event.getSourceVertexName()).increment(); if(!queue.offer(event)) { throw new IllegalStateException("Queue full"); } checkForSourceCompletion(event.getSourceVertexName()); } } }