public DeterminePartitionsDimSelectionMapperHelper(HadoopDruidIndexerConfig config, String partitionDimension) { this.config = config; this.partitionDimension = partitionDimension; final ImmutableMap.Builder<Long, Integer> timeIndexBuilder = ImmutableMap.builder(); int idx = 0; for (final Interval bucketInterval : config.getGranularitySpec().bucketIntervals().get()) { timeIndexBuilder.put(bucketInterval.getStartMillis(), idx); idx++; } this.intervalIndexes = timeIndexBuilder.build(); }
public Optional<Set<Interval>> getSegmentGranularIntervals() { return Optional.fromNullable( (Set<Interval>) schema.getDataSchema() .getGranularitySpec() .bucketIntervals() .orNull() ); }
public Optional<List<Interval>> getIntervals() { Optional<SortedSet<Interval>> setOptional = schema.getDataSchema().getGranularitySpec().bucketIntervals(); if (setOptional.isPresent()) { return Optional.of((List<Interval>) JodaUtils.condenseIntervals(setOptional.get())); } else { return Optional.absent(); } }
@Override public boolean isReady(TaskActionClient taskActionClient) throws Exception { final Optional<SortedSet<Interval>> intervals = ingestionSchema.getDataSchema() .getGranularitySpec() .bucketIntervals(); if (intervals.isPresent()) { return isReady(taskActionClient, intervals.get()); } else { return true; } }
@Override public boolean isReady(TaskActionClient taskActionClient) { final Optional<SortedSet<Interval>> intervals = ingestionSchema.getDataSchema() .getGranularitySpec() .bucketIntervals(); return !intervals.isPresent() || checkLockAcquired(taskActionClient, intervals.get()); }
@Override public boolean isReady(TaskActionClient taskActionClient) throws Exception { final Optional<SortedSet<Interval>> intervals = ingestionSchema.getDataSchema() .getGranularitySpec() .bucketIntervals(); return !intervals.isPresent() || isReady(taskActionClient, intervals.get()); }
final SortedSet<Interval> intervals = granularitySpec.bucketIntervals().get();
@Override public boolean isReady(TaskActionClient taskActionClient) throws Exception { Optional<SortedSet<Interval>> intervals = spec.getDataSchema().getGranularitySpec().bucketIntervals(); if (intervals.isPresent()) { Interval interval = JodaUtils.umbrellaInterval( JodaUtils.condenseIntervals( intervals.get() ) ); return taskActionClient.submit(new LockTryAcquireAction(TaskLockType.EXCLUSIVE, interval)) != null; } else { return true; } }
final GranularitySpec granularitySpec = getIngestionSchema().getDataSchema().getGranularitySpec(); final SortedSet<Interval> bucketIntervals = Preconditions.checkNotNull( granularitySpec.bucketIntervals().orNull(), "bucketIntervals" );
@Test public void testJson() { final GranularitySpec spec = new ArbitraryGranularitySpec(Granularities.NONE, Lists.newArrayList( Intervals.of("2012-01-08T00Z/2012-01-11T00Z"), Intervals.of("2012-02-01T00Z/2012-03-01T00Z"), Intervals.of("2012-01-07T00Z/2012-01-08T00Z"), Intervals.of("2012-01-03T00Z/2012-01-04T00Z"), Intervals.of("2012-01-01T00Z/2012-01-03T00Z") )); try { final GranularitySpec rtSpec = jsonMapper.readValue(jsonMapper.writeValueAsString(spec), GranularitySpec.class); Assert.assertEquals("Round-trip", spec.bucketIntervals(), rtSpec.bucketIntervals()); } catch (Exception e) { throw Throwables.propagate(e); } } }
.bucketIntervals() .isPresent();
final boolean determineIntervals = !granularitySpec.bucketIntervals().isPresent();
boolean determineIntervals = !spec.getDataSchema().getGranularitySpec().bucketIntervals().isPresent(); Interval interval = JodaUtils.umbrellaInterval( JodaUtils.condenseIntervals( indexerSchema.getDataSchema().getGranularitySpec().bucketIntervals().get()
@Test public void testJson() { final GranularitySpec spec = new UniformGranularitySpec( Granularities.DAY, null, Lists.newArrayList( Intervals.of("2012-01-08T00Z/2012-01-11T00Z"), Intervals.of("2012-01-07T00Z/2012-01-08T00Z"), Intervals.of("2012-01-03T00Z/2012-01-04T00Z"), Intervals.of("2012-01-01T00Z/2012-01-03T00Z") ) ); try { final GranularitySpec rtSpec = jsonMapper.readValue(jsonMapper.writeValueAsString(spec), GranularitySpec.class); Assert.assertEquals( "Round-trip bucketIntervals", spec.bucketIntervals(), rtSpec.bucketIntervals() ); Assert.assertEquals( "Round-trip granularity", spec.getSegmentGranularity(), rtSpec.getSegmentGranularity() ); } catch (Exception e) { throw Throwables.propagate(e); } }
if (!granularitySpec.bucketIntervals().isPresent() || granularitySpec.bucketInterval(DateTimes.utc(inputRow.getTimestampFromEpoch())) .isPresent()) {
@Nullable final Long maxTotalRows = IndexTask.getValidMaxTotalRows(tuningConfig); final long pushTimeout = tuningConfig.getPushTimeout(); final boolean explicitIntervals = granularitySpec.bucketIntervals().isPresent(); final SegmentAllocator segmentAllocator = createSegmentAllocator(toolbox, taskClient, ingestionSchema);
Assert.assertTrue(spec.bucketIntervals().isPresent()); final Optional<SortedSet<Interval>> sortedSetOptional = spec.bucketIntervals(); final SortedSet<Interval> intervals = sortedSetOptional.get(); ArrayList<Long> actualIntervals = new ArrayList<>();
final boolean explicitIntervals = dataSchema.getGranularitySpec().bucketIntervals().isPresent(); final ParallelIndexIOConfig ioConfig = ingestionSchema.getIOConfig(); if (ioConfig.isAppendToExisting() || !explicitIntervals) {
Intervals.of("2012-01-10T00Z/P1D") ), Lists.newArrayList(spec.bucketIntervals().get()) );
Intervals.of("2012-02-01T00Z/2012-03-01T00Z") ), Lists.newArrayList(spec.bucketIntervals().get()) );