private void generatePartitionIds() throws Exception { for (int writer = 0; writer < partitionIds.length; writer++) { partitionIds[writer] = new ResultPartitionID(); } }
@Override public String toString() { return "ResultPartition " + partitionId.toString() + " [" + partitionType + ", " + subpartitions.length + " subpartitions, " + pendingReferences + " pending references]"; }
public PartitionProducerDisposedException(ResultPartitionID resultPartitionID) { super(String.format("Execution %s producing partition %s has already been disposed.", resultPartitionID.getProducerId(), resultPartitionID.getPartitionId())); }
ResultPartitionID predecessorResultPartition = dataConsumptionException.get().getResultPartitionId(); Execution producer = executionGraph.getRegisteredExecutions().get(predecessorResultPartition.getProducerId()); if (producer == null) { predecessorResultPartition.getPartitionId()); if (resultPartition != null) { Execution producerVertexCurrentAttempt = resultPartition.getProducer().getCurrentExecutionAttempt(); if (producerVertexCurrentAttempt.getAttemptId().equals(predecessorResultPartition.getProducerId())) { producer = producerVertexCurrentAttempt; } else { LOG.info("Try restarting producer of {} due to DataConsumptionException", taskExecution); this.onTaskFailure(producer, new FlinkException(predecessorResultPartition.toString() + " was report error by consumer."));
void resetResultPartitionID(ResultPartitionID[] partitionIds) { Map<IntermediateResultPartitionID, IntermediateResultPartition> newResultPartitions = new LinkedHashMap<>(resultPartitions.size()); Iterator<IntermediateResultPartition> iterator = resultPartitions.values().iterator(); for (int i = 0; i < resultPartitions.size(); i++) { IntermediateResultPartition resultPartition = iterator.next(); IntermediateResultPartitionID originId = resultPartition.getPartitionId(); resultPartition.setPartitionId(partitionIds[i].getPartitionId()); resultPartition.getIntermediateResult().resetLookupHelper(originId, partitionIds[i].getPartitionId()); newResultPartitions.put(resultPartition.getPartitionId(), resultPartition); } this.resultPartitions = newResultPartitions; }
public PartitionProducerDisposedException(ResultPartitionID resultPartitionID) { super(String.format("Execution %s producing partition %s has already been disposed.", resultPartitionID.getProducerId(), resultPartitionID.getPartitionId())); }
getSpillRootPath(taskManagerConfiguration, jobId.toString(), partitionId.toString()), partitionId.getProducerId().toString(), partitionId.getPartitionId().toString()); this.hashMaxSubpartitions = taskManagerConfiguration.getInteger( TaskManagerOptions.TASK_MANAGER_OUTPUT_HASH_MAX_SUBPARTITIONS);
inputGate.retriggerPartitionRequest(partitionId.getPartitionId());
public PartitionProducerDisposedException(ResultPartitionID resultPartitionID) { super(String.format("Execution %s producing partition %s has already been disposed.", resultPartitionID.getProducerId(), resultPartitionID.getPartitionId())); }
@Override public void readFrom(ByteBuf buffer) throws IOException { // TODO Directly deserialize fromNetty's buffer int length = buffer.readInt(); ByteBuffer serializedEvent = ByteBuffer.allocate(length); buffer.readBytes(serializedEvent); serializedEvent.flip(); event = (TaskEvent) EventSerializer.fromSerializedEvent(serializedEvent, getClass().getClassLoader()); partitionId = new ResultPartitionID(IntermediateResultPartitionID.fromByteBuf(buffer), ExecutionAttemptID.fromByteBuf(buffer)); receiverId = InputChannelID.fromByteBuf(buffer); } }
inputGate.retriggerPartitionRequest(partitionId.getPartitionId());
@Override public String toString() { return "ResultPartition " + partitionId.toString() + " [" + partitionType + ", " + subpartitions.length + " subpartitions, " + pendingReferences + " pending references]"; }
public PartitionProducerDisposedException(ResultPartitionID resultPartitionID) { super(String.format("Execution %s producing partition %s has already been disposed.", resultPartitionID.getProducerId(), resultPartitionID.getPartitionId())); }
@Override public void readFrom(ByteBuf buffer) { partitionId = new ResultPartitionID(IntermediateResultPartitionID.fromByteBuf(buffer), ExecutionAttemptID.fromByteBuf(buffer)); queueIndex = buffer.readInt(); receiverId = InputChannelID.fromByteBuf(buffer); }
final IntermediateResultPartitionID partitionId = icdd.getConsumedPartitionId().getPartitionId();
@Override public String toString() { return "ResultPartition " + partitionId.toString() + " [" + partitionType + ", " + subpartitions.length + " subpartitions, " + pendingReferences + " pending references]"; }
@Override public boolean equals(Object obj) { if (obj != null && obj.getClass() == ResultPartitionID.class) { ResultPartitionID o = (ResultPartitionID) obj; return o.getPartitionId().equals(partitionId) && o.getProducerId().equals(producerId); } return false; }
/** Convert relative partition directory to ResultPartitionID according to generatePartitionRootPath() method. */ public static ResultPartitionID convertRelativeDirToResultPartitionID(String relativeDir) { if (!relativeDir.startsWith(PARTITION_DIR_PREFIX)) { return null; } String[] segments = relativeDir.substring(PARTITION_DIR_PREFIX.length()).split(SPLITTER); if (segments == null || segments.length != 2) { return null; } try { return new ResultPartitionID( new IntermediateResultPartitionID(DatatypeConverter.parseHexBinary(segments[1])), new ExecutionAttemptID(DatatypeConverter.parseHexBinary(segments[0]))); } catch (Exception e) { return null; } }
inputGate.retriggerPartitionRequest(partitionId.getPartitionId());
@Override public String toString() { return "InternalResultPartition " + partitionId.toString() + " [" + partitionType + ", " + subpartitions.length + " subpartitions, " + pendingReferences + " pending references]"; }