@Override public CompactionSegmentIterator reset( Map<String, DataSourceCompactionConfig> compactionConfigs, Map<String, VersionedIntervalTimeline<String, DataSegment>> dataSources, Map<String, List<Interval>> skipIntervals ) { return new NewestSegmentFirstIterator(compactionConfigs, dataSources, skipIntervals); } }
public long remainingSegmentSizeBytesForCompaction(String dataSource) { return segmentCompactor.getRemainingSegmentSizeBytes(dataSource); }
@Benchmark public void measureNewestSegmentFirstPolicy(Blackhole blackhole) { final CompactionSegmentIterator iterator = policy.reset(compactionConfigs, dataSources, Collections.emptyMap()); for (int i = 0; i < numCompactionTaskSlots && iterator.hasNext(); i++) { final List<DataSegment> segments = iterator.next(); blackhole.consume(segments); } } }
@Test public void testLargeOffsetAndSmallSegmentInterval() { final Period segmentPeriod = new Period("PT1H"); final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(10000, 100, new Period("P2D"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T20:00:00/2017-11-17T04:00:00"), segmentPeriod), new SegmentGenerateSpec(Intervals.of("2017-11-14T00:00:00/2017-11-16T07:00:00"), segmentPeriod) ) ), Collections.emptyMap() ); assertCompactSegmentIntervals( iterator, segmentPeriod, Intervals.of("2017-11-14T00:00:00/2017-11-14T01:00:00"), Intervals.of("2017-11-15T03:00:00/2017-11-15T04:00:00"), true ); }
@Test public void testIfFirstSegmentIsInSkipOffset() { final VersionedIntervalTimeline<String, DataSegment> timeline = createTimeline( new SegmentGenerateSpec( Intervals.of("2017-12-02T14:00:00/2017-12-03T00:00:00"), new Period("PT5H"), 40000, 1 ) ); final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(40000, 100, new Period("P1D"))), ImmutableMap.of(DATA_SOURCE, timeline), Collections.emptyMap() ); Assert.assertFalse(iterator.hasNext()); }
final DruidCoordinatorSegmentCompactor compactor = new DruidCoordinatorSegmentCompactor(indexingServiceClient); assertCompactSegments( compactor, keepSegmentGranularity, assertCompactSegments( compactor, keepSegmentGranularity, assertCompactSegments( compactor, keepSegmentGranularity, assertLastSegmentNotCompacted(compactor, keepSegmentGranularity);
/** * Does this set of segments fully cover union(all segment intervals)? * * @return true if this set is complete */ public boolean isComplete() { return timelineObjects.size() == 0 || getMergedTimelineInterval().equals(getMergedUnderlyingInterval()); }
/** * Find the next segments to compact for the given dataSource and add them to the queue. * {@link #timelineIterators} is updated according to the found segments. That is, the found segments are removed from * the timeline of the given dataSource. */ private void updateQueue(String dataSourceName, DataSourceCompactionConfig config) { final CompactibleTimelineObjectHolderCursor compactibleTimelineObjectHolderCursor = timelineIterators.get( dataSourceName ); if (compactibleTimelineObjectHolderCursor == null) { log.warn("Cannot find timeline for dataSource[%s]. Skip this dataSource", dataSourceName); return; } final SegmentsToCompact segmentsToCompact = findSegmentsToCompact( compactibleTimelineObjectHolderCursor, config ); if (segmentsToCompact.getNumSegments() > 1) { queue.add(new QueueEntry(segmentsToCompact.segments)); } }
private CoordinatorStats runCompactor(DruidCoordinatorSegmentCompactor compactor, boolean keepSegmentGranularity) { DruidCoordinatorRuntimeParams params = DruidCoordinatorRuntimeParams .newBuilder() .withDataSources(dataSources) .withCompactionConfig(CoordinatorCompactionConfig.from(createCompactionConfigs(keepSegmentGranularity))) .build(); return compactor.run(params).getCoordinatorStats(); }
private void emitTieredStats( final ServiceEmitter emitter, final String metricName, final CoordinatorStats stats, final String statName ) { stats.forEachTieredStat( statName, (final String tier, final long count) -> { emitTieredStat(emitter, metricName, tier, count); } ); }
private CoordinatorStats makeStats(int numCompactionTasks, CompactionSegmentIterator iterator) { final CoordinatorStats stats = new CoordinatorStats(); stats.addToGlobalStat(COMPACT_TASK_COUNT, numCompactionTasks); remainingSegmentSizeBytes = iterator.remainingSegmentSizeBytes(); iterator.remainingSegmentSizeBytes().object2LongEntrySet().fastForEach( entry -> { final String dataSource = entry.getKey(); final long numSegmentsWaitCompact = entry.getLongValue(); stats.addToDataSourceStat(SEGMENT_SIZE_WAIT_COMPACT, dataSource, numSegmentsWaitCompact); } ); return stats; }
@Override public DruidCoordinatorRuntimeParams run(DruidCoordinatorRuntimeParams params) { final CoordinatorStats stats = new CoordinatorStats(); params.getDruidCluster().getHistoricals().forEach((String tier, NavigableSet<ServerHolder> servers) -> { balanceTier(params, tier, servers, stats); }); return params.buildFromExisting().withCoordinatorStats(stats).build(); }
@Test public void testSkipUnknownDataSource() { final String unknownDataSource = "unknown"; final Period segmentPeriod = new Period("PT1H"); final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of( unknownDataSource, createCompactionConfig(10000, 100, new Period("P2D")), DATA_SOURCE, createCompactionConfig(10000, 100, new Period("P2D")) ), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T20:00:00/2017-11-17T04:00:00"), segmentPeriod), new SegmentGenerateSpec(Intervals.of("2017-11-14T00:00:00/2017-11-16T07:00:00"), segmentPeriod) ) ), Collections.emptyMap() ); assertCompactSegmentIntervals( iterator, segmentPeriod, Intervals.of("2017-11-14T00:00:00/2017-11-14T01:00:00"), Intervals.of("2017-11-15T03:00:00/2017-11-15T04:00:00"), true ); }
@Test public void testIfFirstSegmentOverlapsSkipOffset() { final VersionedIntervalTimeline<String, DataSegment> timeline = createTimeline( new SegmentGenerateSpec( Intervals.of("2017-12-01T23:00:00/2017-12-03T00:00:00"), new Period("PT5H"), 40000, 1 ) ); final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(40000, 100, new Period("P1D"))), ImmutableMap.of(DATA_SOURCE, timeline), Collections.emptyMap() ); Assert.assertFalse(iterator.hasNext()); }
@Test public void testSmallOffsetAndLargeSegmentInterval() { final Period segmentPeriod = new Period("PT1H"); final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(10000, 100, new Period("PT1M"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T20:00:00/2017-11-17T04:00:00"), segmentPeriod), new SegmentGenerateSpec(Intervals.of("2017-11-14T00:00:00/2017-11-16T07:00:00"), segmentPeriod) ) ), Collections.emptyMap() ); assertCompactSegmentIntervals( iterator, segmentPeriod, Intervals.of("2017-11-16T20:00:00/2017-11-16T21:00:00"), Intervals.of("2017-11-17T02:00:00/2017-11-17T03:00:00"), false ); assertCompactSegmentIntervals( iterator, segmentPeriod, Intervals.of("2017-11-14T00:00:00/2017-11-14T01:00:00"), Intervals.of("2017-11-16T06:00:00/2017-11-16T07:00:00"), true ); }
@Test public void testIgnoreSingleSegmentToCompact() { final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(800000, 100, new Period("P1D"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec( Intervals.of("2017-12-02T00:00:00/2017-12-03T00:00:00"), new Period("P1D"), 200, 1 ), new SegmentGenerateSpec( Intervals.of("2017-12-01T00:00:00/2017-12-02T00:00:00"), new Period("P1D"), 200, 1 ) ) ), Collections.emptyMap() ); Assert.assertFalse(iterator.hasNext()); }
final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(10000, 100, new Period("PT1H1M"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T20:00:00/2017-11-17T04:00:00"), segmentPeriod), new SegmentGenerateSpec(Intervals.of("2017-11-14T00:00:00/2017-11-15T07:00:00"), segmentPeriod) ); assertCompactSegmentIntervals( iterator, segmentPeriod, ); assertCompactSegmentIntervals( iterator, segmentPeriod,
final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(10000, 5, new Period("PT1H1M"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T20:00:00/2017-11-17T04:00:00"), segmentPeriod), new SegmentGenerateSpec(Intervals.of("2017-11-14T00:00:00/2017-11-15T07:00:00"), segmentPeriod) ); assertCompactSegmentIntervals( iterator, segmentPeriod, ); assertCompactSegmentIntervals( iterator, segmentPeriod,
final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(10000, 100, new Period("P1D"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T20:00:00/2017-11-17T04:00:00"), segmentPeriod), new SegmentGenerateSpec(Intervals.of("2017-11-14T00:00:00/2017-11-16T07:00:00"), segmentPeriod) assertCompactSegmentIntervals( iterator, segmentPeriod, ); assertCompactSegmentIntervals( iterator, segmentPeriod,
final CompactionSegmentIterator iterator = policy.reset( ImmutableMap.of(DATA_SOURCE, createCompactionConfig(10000, 100, new Period("PT1H"))), ImmutableMap.of( DATA_SOURCE, createTimeline( new SegmentGenerateSpec(Intervals.of("2017-11-16T00:00:00/2017-11-17T00:00:00"), segmentPeriod) assertCompactSegmentIntervals( iterator, segmentPeriod, ); assertCompactSegmentIntervals( iterator, segmentPeriod, ); assertCompactSegmentIntervals( iterator, segmentPeriod,