@Override public Return accept(PartitionDetail partitionDetail) { String sField = (String) partitionDetail.getPartitionKey().getField("s"); if (!allowedSField.equals(sField)) { return Return.SKIP; } int iField = (int) partitionDetail.getPartitionKey().getField("i"); if (stopOnI != null && stopOnI.equals(iField)) { return Return.STOP; } return Return.ACCEPT; } }
@Nullable @Override public TimePartitionDetail getPartitionByTime(long time) { PartitionDetail partitionDetail = getPartition(partitionKeyForTime(time)); return partitionDetail == null ? null : new BasicTimePartitionDetail(this, partitionDetail.getRelativePath(), partitionDetail.getPartitionKey(), partitionDetail.getMetadata()); }
@Nullable @Override public TimePartitionDetail getPartitionByTime(long time) { PartitionDetail partitionDetail = getPartition(partitionKeyForTime(time)); return partitionDetail == null ? null : new BasicTimePartitionDetail(this, partitionDetail.getRelativePath(), partitionDetail.getPartitionKey(), partitionDetail.getMetadata()); }
public void deleteMatchingPartitionsByTime(long upperLimit) throws IOException { if (upperLimit > 0 && upperLimit < Long.MAX_VALUE) { PartitionFilter filter = PartitionFilter.builder().addRangeCondition(SNAPSHOT_FIELD, null, upperLimit).build(); Set<PartitionDetail> partitions = files.getPartitions(filter); for (PartitionDetail partition : partitions) { files.dropPartition(partition.getPartitionKey()); } } }
/** * Populates the ConsumerWorkingSet by fetching partitions from the given PartitionedFileSet. * * @param partitionedFileSet the PartitionedFileSet to fetch partitions from * @param configuration the ConsumerConfiguration which defines parameters for consuming */ public void populate(PartitionedFileSet partitionedFileSet, ConsumerConfiguration configuration) { int numToPopulate = configuration.getMaxWorkingSetSize() - partitions.size(); Predicate<PartitionDetail> predicate = configuration.getPartitionPredicate(); co.cask.cdap.api.dataset.lib.PartitionConsumerResult result = partitionedFileSet.consumePartitions(partitionConsumerState, numToPopulate, predicate); List<PartitionDetail> partitions = result.getPartitions(); for (PartitionDetail partition : partitions) { addPartition(partition.getPartitionKey()); } partitionConsumerState = result.getPartitionConsumerState(); }
@Override public void apply() throws Exception { // consuming and aborting the partition numRetries times plus one (for the first attempt) makes it get removed // from the working set List<PartitionDetail> partitionDetails = partitionConsumer.consumePartitions(1).getPartitions(); Assert.assertEquals(1, partitionDetails.size()); Assert.assertEquals(partitionKey1, partitionDetails.get(0).getPartitionKey()); // aborting the processing of the partition, to put it back in the working set partitionConsumer.onFinish(partitionDetails, false); } });
@Override public void apply() throws Exception { // drop all existing partitions (2 of which are not consumed) for (PartitionDetail partitionDetail : dataset.getPartitions(PartitionFilter.ALWAYS_MATCH)) { dataset.dropPartition(partitionDetail.getPartitionKey()); } // add 5 new ones for (PartitionKey partitionKey : partitionKeys2) { dataset.getPartitionOutput(partitionKey).addPartition(); } } });
@Override public void apply() throws Exception { // consuming and aborting the partition numRetries times plus one (for the first attempt) makes it get removed // from the working set for (int i = 0; i < numRetries + 1; i++) { List<PartitionDetail> partitionDetails = partitionConsumer.consumePartitions(1).getPartitions(); Assert.assertEquals(1, partitionDetails.size()); Assert.assertEquals(partitionKey, partitionDetails.get(0).getPartitionKey()); // aborting the processing of the partition partitionConsumer.onFinish(partitionDetails, false); } // after the 2nd abort, the partition is discarded entirely, and so no partitions are available for consuming PartitionConsumerResult result = partitionConsumer.consumePartitions(1); Assert.assertEquals(0, result.getPartitions().size()); Assert.assertEquals(1, result.getFailedPartitions().size()); Assert.assertEquals(partitionKey, result.getFailedPartitions().get(0).getPartitionKey()); } });
@Override public void apply() throws Exception { // first call to consume will drop the partition from the working set, and return nothing, since it was // the only partition in the working set PartitionConsumerResult result = partitionConsumer.consumePartitions(1); Assert.assertEquals(0, result.getPartitions().size()); Assert.assertEquals(0, result.getFailedPartitions().size()); // following calls to consumePartitions will repopulate the working set and return additional partition(s) result = partitionConsumer.consumePartitions(1); Assert.assertEquals(1, result.getPartitions().size()); Assert.assertEquals(partitionKey2, result.getPartitions().get(0).getPartitionKey()); } });
tpfs.dropPartition(partition.getPartitionKey()); tpfs.dropPartition(customPartition.getPartitionKey());
List<PartitionDetail> partitions = partitionConsumer.consumePartitions(); Assert.assertEquals(1, partitions.size()); Assert.assertEquals(partitionKey1, partitions.get(0).getPartitionKey()); txContext2.finish(); ImmutableSet.of(partitions.get(0).getPartitionKey(), partitions.get(1).getPartitionKey())); txContext2.finish();
pfs.dropPartition(partition.getPartitionKey());
List<? extends PartitionDetail> partitionIterator = partitionConsumer.consumePartitions().getPartitions(); Assert.assertEquals(1, partitionIterator.size()); Assert.assertEquals(partitionKey1, partitionIterator.get(0).getPartitionKey()); txContext2.finish(); partitionIterator = partitionConsumer.consumePartitions().getPartitions(); Assert.assertEquals(1, partitionIterator.size()); Assert.assertEquals(partitionKey2, partitionIterator.get(0).getPartitionKey()); txContext2.finish();