@Override public void handle(Event event) { synchronized (mutex) { actual.handle(event); drained = false; } } };
@Override public void transition(JobImpl job, JobEvent event) { job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId, job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.FAILED)); } }
@SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { taskAttempt.eventHandler.handle(new TaskTAttemptEvent( taskAttempt.attemptId, TaskEventType.T_ATTEMPT_COMMIT_PENDING)); } }
@Override public void handle(Event event) { // Use hashCode (of ApplicationId) to dispatch the event to the child // dispatcher, such that all the writing events of one application will // be handled by one thread, the scheduled order of the these events // will be preserved int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size(); dispatchers.get(index).getEventHandler().handle(event); }
@Override protected void expire(NodeId id) { dispatcher.handle( new RMNodeEvent(id, RMNodeEventType.EXPIRE)); } }
@Override public void handle(Event event) { // Use hashCode (of ApplicationId) to dispatch the event to the child // dispatcher, such that all the writing events of one application will // be handled by one thread, the scheduled order of the these events // will be preserved int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size(); dispatchers.get(index).getEventHandler().handle(event); } }
protected void scheduleTasks(Set<TaskId> taskIDs, boolean recoverTaskOutput) { for (TaskId taskID : taskIDs) { TaskInfo taskInfo = completedTasksFromPreviousRun.remove(taskID); if (taskInfo != null) { eventHandler.handle(new TaskRecoverEvent(taskID, taskInfo, committer, recoverTaskOutput)); } else { eventHandler.handle(new TaskEvent(taskID, TaskEventType.T_SCHEDULE)); } } }
@SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { taskAttempt.eventHandler.handle(new TaskTAttemptEvent( taskAttempt.attemptId, TaskEventType.T_ATTEMPT_COMMIT_PENDING)); } }
@Override public void handleEvent(HistoryEvent event) throws IOException { //Skip over the AM Events this is handled elsewhere if (!(event instanceof AMStartedEvent)) { handler.handle(new JobHistoryEvent(jobId, event)); } }
public void run() { LOG.info("Sending event " + toSend + " to " + job.getID()); job.getEventHandler().handle(toSend); } }
@Override public void transition(JobImpl job, JobEvent event) { job.setFinishTime(); job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId, job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.KILLED)); } }
@Override public void transition(ContainerImpl container, ContainerEvent event) { // Container was PAUSED so tell the scheduler container.dispatcher.getEventHandler().handle( new ContainerSchedulerEvent(container, ContainerSchedulerEventType.CONTAINER_PAUSED)); } }
@Override @SuppressWarnings("unchecked") // dispatcher not typed public void run() { dispatcher.getEventHandler().handle( new LocalizationEvent(LocalizationEventType.CACHE_CLEANUP)); }
@Override protected void handleTimeOut(TezTaskAttemptID attemptId) { eventHandler.handle(new TaskAttemptEventAttemptFailed(attemptId, TaskAttemptEventType.TA_TIMED_OUT, TaskFailureType.NON_FATAL, "AttemptID:" + attemptId.toString() + " Timed out after " + timeOut / 1000 + " secs", TaskAttemptTerminationCause.TASK_HEARTBEAT_ERROR)); } }
@Override public void setQueueName(String queueName) { this.queueName = queueName; JobQueueChangeEvent jqce = new JobQueueChangeEvent(oldJobId, queueName); eventHandler.handle(new JobHistoryEvent(jobId, jqce)); }
@Override public void transition(JobImpl job, JobEvent event) { job.setFinishTime(); job.eventHandler.handle(new CommitterJobAbortEvent(job.jobId, job.jobContext, org.apache.hadoop.mapreduce.JobStatus.State.KILLED)); } }
@SuppressWarnings({ "unchecked" }) private void updatePreemptionCounters(TaskAttemptId yarnAttemptID) { if (!countedPreemptions.contains(yarnAttemptID)) { countedPreemptions.add(yarnAttemptID); JobCounterUpdateEvent jce = new JobCounterUpdateEvent(yarnAttemptID .getTaskId().getJobId()); jce.addCounterUpdate(JobCounter.TASKS_REQ_PREEMPT, 1); eventHandler.handle(jce); } }
protected void internalErrorUncaughtException(TaskEventType type, Exception e) { eventHandler.handle(new DAGEventDiagnosticsUpdate( this.taskId.getVertexID().getDAGId(), "Uncaught exception when handling event " + type + " on Task " + this.taskId + ", error=" + e.getMessage())); eventHandler.handle(new DAGEvent(this.taskId.getVertexID().getDAGId(), DAGEventType.INTERNAL_ERROR)); }
void logJobHistoryFinishedEvent() { this.setFinishTime(); JobFinishedEvent jfe = createJobFinishedEvent(this); LOG.info("Calling handler for JobFinishedEvent "); this.getEventHandler().handle(new JobHistoryEvent(this.jobId, jfe)); }
protected JobStateInternal checkReadyForCommit() { JobStateInternal currentState = getInternalState(); if (completedTaskCount == tasks.size() && currentState == JobStateInternal.RUNNING) { eventHandler.handle(new CommitterJobCommitEvent(jobId, getJobContext())); return JobStateInternal.COMMITTING; } // return the current state as job not ready to commit yet return getInternalState(); }