public LocatedSegmentDescriptor(SegmentDescriptor descriptor, long size, List<DruidServerMetadata> candidates) { this(descriptor.getInterval(), descriptor.getVersion(), descriptor.getPartitionNumber(), size, candidates); }
public static Cache.NamedKey computeSegmentCacheKey( String segmentId, SegmentDescriptor descriptor, byte[] queryCacheKey ) { final Interval segmentQueryInterval = descriptor.getInterval(); final byte[] versionBytes = StringUtils.toUtf8(descriptor.getVersion()); return new Cache.NamedKey( segmentId, ByteBuffer .allocate(16 + versionBytes.length + 4 + queryCacheKey.length) .putLong(segmentQueryInterval.getStartMillis()) .putLong(segmentQueryInterval.getEndMillis()) .put(versionBytes) .putInt(descriptor.getPartitionNumber()) .put(queryCacheKey) .array() ); }
@Override public QueryRunner<T> apply(SegmentDescriptor spec) { final FireChief retVal = partitionChiefs.get(spec.getPartitionNumber()); return retVal == null ? new NoopQueryRunner<T>() : retVal.getQueryRunner(query.withQuerySegmentSpec(new SpecificSegmentSpec(spec))); } }
@Override @SuppressWarnings("unchecked") public Iterable<QueryRunner<T>> apply(SegmentDescriptor input) { final PartitionHolder<ReferenceCountingSegment> entry = timeline.findEntry( input.getInterval(), input.getVersion() ); if (entry == null) { return Collections.singletonList( new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final PartitionChunk<ReferenceCountingSegment> chunk = entry.getChunk(input.getPartitionNumber()); if (chunk == null) { return Collections.singletonList(new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final ReferenceCountingSegment adapter = chunk.getObject(); return Collections.singletonList( buildAndDecorateQueryRunner(factory, toolChest, adapter, input, cpuTimeAccumulator) ); } }
@Override public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext) { return CachingClusteredClient.this.run( queryPlus, responseContext, timeline -> { final VersionedIntervalTimeline<String, ServerSelector> timeline2 = new VersionedIntervalTimeline<>(Ordering.natural()); for (SegmentDescriptor spec : specs) { final PartitionHolder<ServerSelector> entry = timeline.findEntry(spec.getInterval(), spec.getVersion()); if (entry != null) { final PartitionChunk<ServerSelector> chunk = entry.getChunk(spec.getPartitionNumber()); if (chunk != null) { timeline2.add(spec.getInterval(), spec.getVersion(), chunk); } } } return timeline2; } ); } };
static boolean isSegmentLoaded(Iterable<ImmutableSegmentLoadInfo> serverView, SegmentDescriptor descriptor) { for (ImmutableSegmentLoadInfo segmentLoadInfo : serverView) { if (segmentLoadInfo.getSegment().getInterval().contains(descriptor.getInterval()) && segmentLoadInfo.getSegment().getShardSpec().getPartitionNum() == descriptor.getPartitionNumber() && segmentLoadInfo.getSegment().getVersion().compareTo(descriptor.getVersion()) >= 0 && Iterables.any( segmentLoadInfo.getServers(), DruidServerMetadata::segmentReplicatable )) { return true; } } return false; } }
StringUtils.urlEncode(dataSource), descriptor.getInterval(), descriptor.getPartitionNumber(), descriptor.getVersion()
private Sequence<Result<TimeseriesResultValue>> toFilteredQueryableTimeseriesResults( TimeseriesQuery query, List<SegmentId> segmentIds, List<Interval> queryIntervals, List<Iterable<Result<TimeseriesResultValue>>> results ) { MultipleSpecificSegmentSpec spec = (MultipleSpecificSegmentSpec) query.getQuerySegmentSpec(); List<Result<TimeseriesResultValue>> ret = new ArrayList<>(); for (SegmentDescriptor descriptor : spec.getDescriptors()) { SegmentId id = SegmentId.dummy( StringUtils.format("%s_%s", queryIntervals.indexOf(descriptor.getInterval()), descriptor.getPartitionNumber()) ); int index = segmentIds.indexOf(id); if (index != -1) { Result result = new Result( results.get(index).iterator().next().getTimestamp(), new BySegmentResultValueClass( Lists.newArrayList(results.get(index)), id.toString(), descriptor.getInterval() ) ); ret.add(result); } else { throw new ISE("Descriptor %s not found in server", id); } } return Sequences.simple(ret); }
final PartitionChunk<Sink> chunk = holder.getChunk(descriptor.getPartitionNumber()); if (chunk == null) { return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);
public LocatedSegmentDescriptor(SegmentDescriptor descriptor, long size, List<DruidServerMetadata> candidates) { this(descriptor.getInterval(), descriptor.getVersion(), descriptor.getPartitionNumber(), size, candidates); }
@Override public QueryRunner<T> apply(SegmentDescriptor spec) { final FireChief retVal = partitionChiefs.get(spec.getPartitionNumber()); return retVal == null ? new NoopQueryRunner<T>() : retVal.getQueryRunner(query.withQuerySegmentSpec(new SpecificSegmentSpec(spec))); } }
public static Cache.NamedKey computeSegmentCacheKey( String segmentIdentifier, SegmentDescriptor descriptor, byte[] queryCacheKey ) { final Interval segmentQueryInterval = descriptor.getInterval(); final byte[] versionBytes = StringUtils.toUtf8(descriptor.getVersion()); return new Cache.NamedKey( segmentIdentifier, ByteBuffer .allocate(16 + versionBytes.length + 4 + queryCacheKey.length) .putLong(segmentQueryInterval.getStartMillis()) .putLong(segmentQueryInterval.getEndMillis()) .put(versionBytes) .putInt(descriptor.getPartitionNumber()) .put(queryCacheKey).array() ); }
@Override @SuppressWarnings("unchecked") public Iterable<QueryRunner<T>> apply(SegmentDescriptor input) { final PartitionHolder<ReferenceCountingSegment> entry = timeline.findEntry( input.getInterval(), input.getVersion() ); if (entry == null) { return Collections.singletonList( new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final PartitionChunk<ReferenceCountingSegment> chunk = entry.getChunk(input.getPartitionNumber()); if (chunk == null) { return Collections.singletonList(new ReportTimelineMissingSegmentQueryRunner<T>(input)); } final ReferenceCountingSegment adapter = chunk.getObject(); return Collections.singletonList( buildAndDecorateQueryRunner(factory, toolChest, adapter, input, cpuTimeAccumulator) ); } }
@Override public Sequence<T> run(final QueryPlus<T> queryPlus, final Map<String, Object> responseContext) { return CachingClusteredClient.this.run( queryPlus, responseContext, timeline -> { final VersionedIntervalTimeline<String, ServerSelector> timeline2 = new VersionedIntervalTimeline<>(Ordering.natural()); for (SegmentDescriptor spec : specs) { final PartitionHolder<ServerSelector> entry = timeline.findEntry(spec.getInterval(), spec.getVersion()); if (entry != null) { final PartitionChunk<ServerSelector> chunk = entry.getChunk(spec.getPartitionNumber()); if (chunk != null) { timeline2.add(spec.getInterval(), spec.getVersion(), chunk); } } } return timeline2; } ); } };
static boolean isHandOffComplete(List<ImmutableSegmentLoadInfo> serverView, SegmentDescriptor descriptor) { for (ImmutableSegmentLoadInfo segmentLoadInfo : serverView) { if (segmentLoadInfo.getSegment().getInterval().contains(descriptor.getInterval()) && segmentLoadInfo.getSegment().getShardSpec().getPartitionNum() == descriptor.getPartitionNumber() && segmentLoadInfo.getSegment().getVersion().compareTo(descriptor.getVersion()) >= 0 && Iterables.any( segmentLoadInfo.getServers(), new Predicate<DruidServerMetadata>() { @Override public boolean apply(DruidServerMetadata input) { return input.segmentReplicatable(); } } )) { return true; } } return false; }
final PartitionChunk<Sink> chunk = holder.getChunk(descriptor.getPartitionNumber()); if (chunk == null) { return new ReportTimelineMissingSegmentQueryRunner<>(descriptor);