public void setPartition(int partitionNumber, IntermediateResultPartition partition) { if (partition == null || partitionNumber < 0 || partitionNumber >= numParallelProducers) { throw new IllegalArgumentException(); } if (partitions[partitionNumber] != null) { throw new IllegalStateException("Partition #" + partitionNumber + " has already been assigned."); } partitions[partitionNumber] = partition; partitionLookupHelper.put(partition.getPartitionId(), partitionNumber); partitionsAssigned++; }
public void setPartition(int partitionNumber, IntermediateResultPartition partition) { if (partition == null || partitionNumber < 0 || partitionNumber >= numParallelProducers) { throw new IllegalArgumentException(); } if (partitions[partitionNumber] != null) { throw new IllegalStateException("Partition #" + partitionNumber + " has already been assigned."); } partitions[partitionNumber] = partition; partitionLookupHelper.put(partition.getPartitionId(), partitionNumber); partitionsAssigned++; }
public void setPartition(int partitionNumber, IntermediateResultPartition partition) { if (partition == null || partitionNumber < 0 || partitionNumber >= numParallelProducers) { throw new IllegalArgumentException(); } if (partitions[partitionNumber] != null) { throw new IllegalStateException("Partition #" + partitionNumber + " has already been assigned."); } partitions[partitionNumber] = partition; partitionLookupHelper.put(partition.getPartitionId(), partitionNumber); partitionsAssigned++; }
public void setPartition(int partitionNumber, IntermediateResultPartition partition) { if (partition == null || partitionNumber < 0 || partitionNumber >= numParallelProducers) { throw new IllegalArgumentException(); } if (partitions[partitionNumber] != null) { throw new IllegalStateException("Partition #" + partitionNumber + " has already been assigned."); } partitions[partitionNumber] = partition; partitionLookupHelper.put(partition.getPartitionId(), partitionNumber); partitionsAssigned++; }
void resetResultPartitionID(ResultPartitionID[] partitionIds) { Map<IntermediateResultPartitionID, IntermediateResultPartition> newResultPartitions = new LinkedHashMap<>(resultPartitions.size()); Iterator<IntermediateResultPartition> iterator = resultPartitions.values().iterator(); for (int i = 0; i < resultPartitions.size(); i++) { IntermediateResultPartition resultPartition = iterator.next(); IntermediateResultPartitionID originId = resultPartition.getPartitionId(); resultPartition.setPartitionId(partitionIds[i].getPartitionId()); resultPartition.getIntermediateResult().resetLookupHelper(originId, partitionIds[i].getPartitionId()); newResultPartitions.put(resultPartition.getPartitionId(), resultPartition); } this.resultPartitions = newResultPartitions; }
public static ResultPartitionDeploymentDescriptor from( IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) { final IntermediateDataSetID resultId = partition.getIntermediateResult().getId(); final IntermediateResultPartitionID partitionId = partition.getPartitionId(); final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType(); // The produced data is partitioned among a number of subpartitions. // // If no consumers are known at this point, we use a single subpartition, otherwise we have // one for each consuming sub task. int numberOfSubpartitions = 1; if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) { if (partition.getConsumers().size() > 1) { throw new IllegalStateException("Currently, only a single consumer group per partition is supported."); } numberOfSubpartitions = partition.getConsumers().get(0).size(); } return new ResultPartitionDeploymentDescriptor( resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling); } }
public static ResultPartitionDeploymentDescriptor from( IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) { final IntermediateDataSetID resultId = partition.getIntermediateResult().getId(); final IntermediateResultPartitionID partitionId = partition.getPartitionId(); final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType(); // The produced data is partitioned among a number of subpartitions. // // If no consumers are known at this point, we use a single subpartition, otherwise we have // one for each consuming sub task. int numberOfSubpartitions = 1; if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) { if (partition.getConsumers().size() > 1) { throw new IllegalStateException("Currently, only a single consumer group per partition is supported."); } numberOfSubpartitions = partition.getConsumers().get(0).size(); } return new ResultPartitionDeploymentDescriptor( resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling); } }
public static ResultPartitionDeploymentDescriptor from( IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) { final IntermediateDataSetID resultId = partition.getIntermediateResult().getId(); final IntermediateResultPartitionID partitionId = partition.getPartitionId(); final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType(); // The produced data is partitioned among a number of subpartitions. // // If no consumers are known at this point, we use a single subpartition, otherwise we have // one for each consuming sub task. int numberOfSubpartitions = 1; if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) { if (partition.getConsumers().size() > 1) { throw new IllegalStateException("Currently, only a single consumer group per partition is supported."); } numberOfSubpartitions = partition.getConsumers().get(0).size(); } return new ResultPartitionDeploymentDescriptor( resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling); } }
public static ResultPartitionDeploymentDescriptor from( IntermediateResultPartition partition, int maxParallelism, boolean lazyScheduling) { final IntermediateDataSetID resultId = partition.getIntermediateResult().getId(); final IntermediateResultPartitionID partitionId = partition.getPartitionId(); final ResultPartitionType partitionType = partition.getIntermediateResult().getResultType(); // The produced data is partitioned among a number of subpartitions. // // If no consumers are known at this point, we use a single subpartition, otherwise we have // one for each consuming sub task. int numberOfSubpartitions = 1; if (!partition.getConsumers().isEmpty() && !partition.getConsumers().get(0).isEmpty()) { if (partition.getConsumers().size() > 1) { throw new IllegalStateException("Currently, only a single consumer group per partition is supported."); } numberOfSubpartitions = partition.getConsumers().get(0).size(); } return new ResultPartitionDeploymentDescriptor( resultId, partitionId, partitionType, numberOfSubpartitions, maxParallelism, lazyScheduling); } }
/** * Creates a partial input channel for the given partition and producing task. */ public static PartialInputChannelDeploymentDescriptor fromEdge( IntermediateResultPartition partition, Execution producer) { final ResultPartitionID partitionId = new ResultPartitionID( partition.getPartitionId(), producer.getAttemptId()); final IntermediateResult result = partition.getIntermediateResult(); final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation(); return new PartialInputChannelDeploymentDescriptor( result, partitionId, partitionConnectionInfo); } }
/** * Creates a partial input channel for the given partition and producing task. */ public static PartialInputChannelDeploymentDescriptor fromEdge( IntermediateResultPartition partition, Execution producer) { final ResultPartitionID partitionId = new ResultPartitionID( partition.getPartitionId(), producer.getAttemptId()); final IntermediateResult result = partition.getIntermediateResult(); final IntermediateDataSetID resultId = result.getId(); final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation(); final int partitionConnectionIndex = result.getConnectionIndex(); return new PartialInputChannelDeploymentDescriptor( resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex); } }
/** * Creates a partial input channel for the given partition and producing task. */ public static PartialInputChannelDeploymentDescriptor fromEdge( IntermediateResultPartition partition, Execution producer) { final ResultPartitionID partitionId = new ResultPartitionID( partition.getPartitionId(), producer.getAttemptId()); final IntermediateResult result = partition.getIntermediateResult(); final IntermediateDataSetID resultId = result.getId(); final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation(); final int partitionConnectionIndex = result.getConnectionIndex(); return new PartialInputChannelDeploymentDescriptor( resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex); } }
/** * Creates a partial input channel for the given partition and producing task. */ public static PartialInputChannelDeploymentDescriptor fromEdge( IntermediateResultPartition partition, Execution producer) { final ResultPartitionID partitionId = new ResultPartitionID( partition.getPartitionId(), producer.getAttemptId()); final IntermediateResult result = partition.getIntermediateResult(); final IntermediateDataSetID resultId = result.getId(); final TaskManagerLocation partitionConnectionInfo = producer.getAssignedResourceLocation(); final int partitionConnectionIndex = result.getConnectionIndex(); return new PartialInputChannelDeploymentDescriptor( resultId, partitionId, partitionConnectionInfo, partitionConnectionIndex); } }
result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp);
result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp);
result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp);
result.setPartition(subTaskIndex, irp); resultPartitions.put(irp.getPartitionId(), irp);
/** * Recover the pipelined result partition consume status after job master failover. * * @param resultId The intermediate data set id in the log. */ public void recoverResultPartitionStatus( IntermediateDataSetID resultId, TaskManagerLocation location) { IntermediateResultPartition partitionToRecover = null; for (IntermediateResultPartition irp : getProducedPartitions().values()) { if (irp.getIntermediateResult().getId().equals(resultId)) { partitionToRecover = irp; } } if (partitionToRecover == null) { throw new FlinkRuntimeException("Can not find the intermediate result " + resultId + " on " + getTaskNameWithSubtaskIndex()); } if (!(ExecutionState.RUNNING.equals(currentExecution.getState()) && partitionToRecover.getResultType().isPipelined())) { throw new FlinkRuntimeException("Invalid state " + currentExecution.getState() + " for " + getTaskNameWithSubtaskIndex()); } currentExecution.getTaskManagerLocationFuture().complete(location); scheduleOrUpdateConsumers(new ResultPartitionID(partitionToRecover.getPartitionId(), currentExecution.getAttemptId())); }
consumedPartition.getPartitionId(), producer.getAttemptId());
consumedPartition.getPartitionId(), producer.getAttemptId());