@Override public long weight(WorkUnit workUnit) { return workUnit.getPropAsLong(this.field); } }
/** * Get the low watermark of this {@link WorkUnit}. * * @return low watermark * @deprecated use the {@link #getLowWatermark()} method. */ @Deprecated public long getLowWaterMark() { return getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY); }
/** * Get the high watermark of this {@link WorkUnit}. * * @return high watermark * @deprecated use the {@link #getExpectedHighWatermark()} method. */ @Deprecated public long getHighWaterMark() { return getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY); }
@Override public double calcEstimatedSize(WorkUnit workUnit) { long avgSize = this.getEstAvgSizeForPartition(KafkaUtils.getPartition(workUnit)); long numOfRecords = workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY) - workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY); return (double) avgSize * numOfRecords; }
@Override public double calcEstimatedSize(WorkUnit workUnit) { double avgMillis = this.getEstAvgMillisForTopic(KafkaUtils.getTopicName(workUnit)); long numOfRecords = workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY) - workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY); return avgMillis * numOfRecords; }
long maxSplitSize = workUnit.getPropAsLong(MAX_SPLIT_SIZE_KEY, DEFAULT_MAX_SPLIT_SIZE);
private void verifyWorkUnits(List<WorkUnit> workunits, int expectedSize) throws DataRecordException, IOException { for (int i = 0; i < expectedSize; i++) { WorkUnit workUnit = ((MultiWorkUnit) workunits.get(i)).getWorkUnits().get(0); WorkUnitState wuState = new WorkUnitState(workunits.get(i), new State()); wuState.setProp(ConfigurationKeys.SOURCE_FILEBASED_FS_URI, ConfigurationKeys.LOCAL_FS_URI); wuState.setProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL, workUnit.getProp(ConfigurationKeys.SOURCE_FILEBASED_FILES_TO_PULL)); try (DatePartitionedAvroFileExtractor extractor = new DatePartitionedAvroFileExtractor(wuState);) { GenericRecord record = extractor.readRecord(null); Assert.assertEquals(recordTimestamps[i], record.get(PARTITION_COLUMN_NAME)); Assert.assertEquals(recordTimestamps[i], workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_DATE_PARTITION_KEY)); } } }
@Override public long weight(WorkUnit workUnit) { return workUnit.getPropAsLong(this.field); } }
/** * Get the high watermark of this {@link WorkUnit}. * * @return high watermark * @deprecated use the {@link #getExpectedHighWatermark()} method. */ @Deprecated public long getHighWaterMark() { return getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY); }
/** * Get the low watermark of this {@link WorkUnit}. * * @return low watermark * @deprecated use the {@link #getLowWatermark()} method. */ @Deprecated public long getLowWaterMark() { return getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY); }
@Override public double calcEstimatedSize(WorkUnit workUnit) { long avgSize = this.getEstAvgSizeForPartition(KafkaUtils.getPartition(workUnit)); long numOfRecords = workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY) - workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY); return (double) avgSize * numOfRecords; }
@Override public double calcEstimatedSize(WorkUnit workUnit) { double avgMillis = this.getEstAvgMillisForTopic(KafkaUtils.getTopicName(workUnit)); long numOfRecords = workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_HIGH_WATER_MARK_KEY) - workUnit.getPropAsLong(ConfigurationKeys.WORK_UNIT_LOW_WATER_MARK_KEY); return avgMillis * numOfRecords; }
long maxSplitSize = workUnit.getPropAsLong(MAX_SPLIT_SIZE_KEY, DEFAULT_MAX_SPLIT_SIZE);