@Override public Interval apply(SegmentDescriptor input) { return input.getInterval(); } }
@Override public List<Interval> getIntervals() { return Collections.singletonList(descriptor.getInterval()); }
public LocatedSegmentDescriptor(SegmentDescriptor descriptor, long size, List<DruidServerMetadata> candidates) { this(descriptor.getInterval(), descriptor.getVersion(), descriptor.getPartitionNumber(), size, candidates); }
public static Cache.NamedKey computeSegmentCacheKey( String segmentId, SegmentDescriptor descriptor, byte[] queryCacheKey ) { final Interval segmentQueryInterval = descriptor.getInterval(); final byte[] versionBytes = StringUtils.toUtf8(descriptor.getVersion()); return new Cache.NamedKey( segmentId, ByteBuffer .allocate(16 + versionBytes.length + 4 + queryCacheKey.length) .putLong(segmentQueryInterval.getStartMillis()) .putLong(segmentQueryInterval.getEndMillis()) .put(versionBytes) .putInt(descriptor.getPartitionNumber()) .put(queryCacheKey) .array() ); }
private List<Pair<Interval, byte[]>> pruneSegmentsWithCachedResults( final byte[] queryCacheKey, final Set<ServerToSegment> segments ) { if (queryCacheKey == null) { return Collections.emptyList(); } final List<Pair<Interval, byte[]>> alreadyCachedResults = new ArrayList<>(); Map<ServerToSegment, Cache.NamedKey> perSegmentCacheKeys = computePerSegmentCacheKeys(segments, queryCacheKey); // Pull cached segments from cache and remove from set of segments to query final Map<Cache.NamedKey, byte[]> cachedValues = computeCachedValues(perSegmentCacheKeys); perSegmentCacheKeys.forEach((segment, segmentCacheKey) -> { final Interval segmentQueryInterval = segment.getSegmentDescriptor().getInterval(); final byte[] cachedValue = cachedValues.get(segmentCacheKey); if (cachedValue != null) { // remove cached segment from set of segments to query segments.remove(segment); alreadyCachedResults.add(Pair.of(segmentQueryInterval, cachedValue)); } else if (populateCache) { // otherwise, if populating cache, add segment to list of segments to cache final SegmentId segmentId = segment.getServer().getSegment().getId(); addCachePopulatorKey(segmentCacheKey, segmentId, segmentQueryInterval); } }); return alreadyCachedResults; }
@Override public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext) { return CachingClusteredClient.this.run( queryPlus, responseContext, timeline -> { final VersionedIntervalTimeline<String, ServerSelector> timeline2 = new VersionedIntervalTimeline<>(Ordering.natural()); for (SegmentDescriptor spec : specs) { final PartitionHolder<ServerSelector> entry = timeline.findEntry(spec.getInterval(), spec.getVersion()); if (entry != null) { final PartitionChunk<ServerSelector> chunk = entry.getChunk(spec.getPartitionNumber()); if (chunk != null) { timeline2.add(spec.getInterval(), spec.getVersion(), chunk); } } } return timeline2; } ); } };
@Override @SuppressWarnings("unchecked") public Iterable<QueryRunner<T>> apply(SegmentDescriptor input) { final PartitionHolder<ReferenceCountingSegment> entry = timeline.findEntry( input.getInterval(), input.getVersion() ); if (entry == null) { return Collections.singletonList( new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final PartitionChunk<ReferenceCountingSegment> chunk = entry.getChunk(input.getPartitionNumber()); if (chunk == null) { return Collections.singletonList(new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final ReferenceCountingSegment adapter = chunk.getObject(); return Collections.singletonList( buildAndDecorateQueryRunner(factory, toolChest, adapter, input, cpuTimeAccumulator) ); } }
@Override public Iterable<QueryRunner<T>> apply(final SegmentDescriptor descriptor) { final PartitionHolder<Segment> holder = timeline.findEntry( descriptor.getInterval(), descriptor.getVersion() ); return Iterables.transform( holder, new Function<PartitionChunk<Segment>, QueryRunner<T>>() { @Override public QueryRunner<T> apply(PartitionChunk<Segment> chunk) { return new SpecificSegmentQueryRunner<T>( factory.createRunner(chunk.getObject()), new SpecificSegmentSpec(descriptor) ); } } ); } }
Interval segmentInterval = optimizationContext.getSegmentDescriptor().getInterval(); List<Interval> filterIntervals = intervalDimFilter.getIntervals(); List<Interval> excludedFilterIntervals = new ArrayList<>();
static boolean isSegmentLoaded(Iterable<ImmutableSegmentLoadInfo> serverView, SegmentDescriptor descriptor) { for (ImmutableSegmentLoadInfo segmentLoadInfo : serverView) { if (segmentLoadInfo.getSegment().getInterval().contains(descriptor.getInterval()) && segmentLoadInfo.getSegment().getShardSpec().getPartitionNum() == descriptor.getPartitionNumber() && segmentLoadInfo.getSegment().getVersion().compareTo(descriptor.getVersion()) >= 0 && Iterables.any( segmentLoadInfo.getServers(), DruidServerMetadata::segmentReplicatable )) { return true; } } return false; } }
private Sequence<Result<TimeseriesResultValue>> toFilteredQueryableTimeseriesResults( TimeseriesQuery query, List<SegmentId> segmentIds, List<Interval> queryIntervals, List<Iterable<Result<TimeseriesResultValue>>> results ) { MultipleSpecificSegmentSpec spec = (MultipleSpecificSegmentSpec) query.getQuerySegmentSpec(); List<Result<TimeseriesResultValue>> ret = new ArrayList<>(); for (SegmentDescriptor descriptor : spec.getDescriptors()) { SegmentId id = SegmentId.dummy( StringUtils.format("%s_%s", queryIntervals.indexOf(descriptor.getInterval()), descriptor.getPartitionNumber()) ); int index = segmentIds.indexOf(id); if (index != -1) { Result result = new Result( results.get(index).iterator().next().getTimestamp(), new BySegmentResultValueClass( Lists.newArrayList(results.get(index)), id.toString(), descriptor.getInterval() ) ); ret.add(result); } else { throw new ISE("Descriptor %s not found in server", id); } } return Sequences.simple(ret); }
"/druid/coordinator/v1/datasources/%s/handoffComplete?interval=%s&partitionNumber=%d&version=%s", StringUtils.urlEncode(dataSource), descriptor.getInterval(), descriptor.getPartitionNumber(), descriptor.getVersion()
descriptor.getInterval(), descriptor.getVersion() ); new BySegmentQueryRunner<>( sinkSegmentId, descriptor.getInterval().getStart(), factory.mergeRunners( Execs.directExecutor(),
runners.get(descriptor.getInterval()), new SpecificSegmentSpec(descriptor) );
@Override public Interval apply(SegmentDescriptor input) { return input.getInterval(); } }
@Override public List<Interval> getIntervals() { return Collections.singletonList(descriptor.getInterval()); }
public LocatedSegmentDescriptor(SegmentDescriptor descriptor, long size, List<DruidServerMetadata> candidates) { this(descriptor.getInterval(), descriptor.getVersion(), descriptor.getPartitionNumber(), size, candidates); }
public static Cache.NamedKey computeSegmentCacheKey( String segmentIdentifier, SegmentDescriptor descriptor, byte[] queryCacheKey ) { final Interval segmentQueryInterval = descriptor.getInterval(); final byte[] versionBytes = StringUtils.toUtf8(descriptor.getVersion()); return new Cache.NamedKey( segmentIdentifier, ByteBuffer .allocate(16 + versionBytes.length + 4 + queryCacheKey.length) .putLong(segmentQueryInterval.getStartMillis()) .putLong(segmentQueryInterval.getEndMillis()) .put(versionBytes) .putInt(descriptor.getPartitionNumber()) .put(queryCacheKey).array() ); }
@Override public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext) { return CachingClusteredClient.this.run( queryPlus, responseContext, timeline -> { final VersionedIntervalTimeline<String, ServerSelector> timeline2 = new VersionedIntervalTimeline<>(Ordering.natural()); for (SegmentDescriptor spec : specs) { final PartitionHolder<ServerSelector> entry = timeline.findEntry(spec.getInterval(), spec.getVersion()); if (entry != null) { final PartitionChunk<ServerSelector> chunk = entry.getChunk(spec.getPartitionNumber()); if (chunk != null) { timeline2.add(spec.getInterval(), spec.getVersion(), chunk); } } } return timeline2; } ); } };
@Override @SuppressWarnings("unchecked") public Iterable<QueryRunner<T>> apply(SegmentDescriptor input) { final PartitionHolder<ReferenceCountingSegment> entry = timeline.findEntry( input.getInterval(), input.getVersion() ); if (entry == null) { return Collections.singletonList( new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final PartitionChunk<ReferenceCountingSegment> chunk = entry.getChunk(input.getPartitionNumber()); if (chunk == null) { return Collections.singletonList(new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final ReferenceCountingSegment adapter = chunk.getObject(); return Collections.singletonList( buildAndDecorateQueryRunner(factory, toolChest, adapter, input, cpuTimeAccumulator) ); } }