private static ShardSpec getNextPartitionShardSpec(ShardSpec shardSpec) { if (shardSpec instanceof LinearShardSpec) { return new LinearShardSpec(shardSpec.getPartitionNum() + 1); } else if (shardSpec instanceof NumberedShardSpec) { return new NumberedShardSpec(shardSpec.getPartitionNum(), ((NumberedShardSpec) shardSpec).getPartitions()); } else { // Druid only support appending more partitions to Linear and Numbered ShardSpecs. throw new IllegalStateException(String.format("Cannot expand shard spec [%s]", shardSpec)); } }
if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject() .getShardSpec() .getPartitionNum()) { max = SegmentIdentifier.fromDataSegment(existing.getObject());
interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(currentOpenSegment.getShardSpec().getPartitionNum() + 1)); pushSegments(Lists.newArrayList(currentOpenSegment)); LOG.info("Creating new partition for segment {}, partition num {}", retVal.getIdentifierAsString(), retVal.getShardSpec().getPartitionNum()); currentOpenSegment = retVal; return retVal;
Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum());
if (currentOpenSegment.getShardSpec().getPartitionNum() != partitionNumber || !currentOpenSegment.getInterval().equals(interval)) { pushSegments(ImmutableList.of(currentOpenSegment));
Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum()); Path expectedFinalHadoopPath =
Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum());
private static File makeBasePersistSubdirectory( final File basePersistDirectory, final String dataSource, final ShardSpec shardSpec ) { final File dataSourceDirectory = new File(basePersistDirectory, dataSource); return new File(dataSourceDirectory, String.valueOf(shardSpec.getPartitionNum())); } }
public static String getSequenceName(Interval interval, String version, ShardSpec shardSpec) { return StringUtils.format("index_%s_%s_%d", interval, version, shardSpec.getPartitionNum()); } }
@LifecycleStart public void start() throws IOException { serverAnnouncer.announce(); fireChiefExecutor = Execs.multiThreaded(fireDepartments.size(), "chief-%d"); for (final FireDepartment fireDepartment : fireDepartments) { final DataSchema schema = fireDepartment.getDataSchema(); final FireChief chief = new FireChief(fireDepartment, conglomerate); chiefs.computeIfAbsent(schema.getDataSource(), k -> new HashMap<>()) .put(fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), chief); fireChiefExecutor.submit(chief); } }
@JsonCreator public SubTask( @JsonProperty("groupId") String groupId, @JsonProperty("segment") DataSegment segment, @JsonProperty("indexSpec") IndexSpec indexSpec, @JsonProperty("force") Boolean force, @JsonProperty("validate") Boolean validate, @JsonProperty("segmentWriteOutMediumFactory") @Nullable SegmentWriteOutMediumFactory segmentWriteOutMediumFactory, @JsonProperty("context") Map<String, Object> context ) { super( joinId( groupId, "sub", segment.getInterval().getStart(), segment.getInterval().getEnd(), segment.getShardSpec().getPartitionNum() ), groupId, segment.getDataSource(), segment.getInterval(), context ); this.segment = segment; this.indexSpec = indexSpec == null ? new IndexSpec() : indexSpec; this.force = force == null ? false : force; this.validate = validate == null ? true : validate; this.segmentWriteOutMediumFactory = segmentWriteOutMediumFactory; }
/** * Get the proper bucket for some input row. * * @param inputRow an InputRow * * @return the Bucket that this row belongs to */ public Optional<Bucket> getBucket(InputRow inputRow) { final Optional<Interval> timeBucket = schema.getDataSchema().getGranularitySpec().bucketInterval( DateTimes.utc(inputRow.getTimestampFromEpoch()) ); if (!timeBucket.isPresent()) { return Optional.absent(); } final DateTime bucketStart = timeBucket.get().getStart(); final ShardSpec actualSpec = shardSpecLookups.get(bucketStart.getMillis()) .getShardSpec( rollupGran.bucketStart(inputRow.getTimestamp()).getMillis(), inputRow ); final HadoopyShardSpec hadoopyShardSpec = hadoopShardSpecLookup.get(bucketStart.getMillis()).get(actualSpec); return Optional.of( new Bucket( hadoopyShardSpec.getShardNum(), bucketStart, actualSpec.getPartitionNum() ) ); }
hadoopyShardSpec.getShardNum(), timeBucket.get().getStart(), actualSpec.getPartitionNum()
private static String makeTaskId(FireDepartment fireDepartment) { return makeTaskId( fireDepartment.getDataSchema().getDataSource(), fireDepartment.getTuningConfig().getShardSpec().getPartitionNum(), DateTimes.nowUtc(), random.nextInt() ); }
@Override public String apply(DataSegment x) { return String.format( "%s_%s_%s_%s", x.getInterval().getStart(), x.getInterval().getEnd(), x.getVersion(), x.getShardSpec().getPartitionNum() ); } }
@JsonCreator public SubTask( @JsonProperty("groupId") String groupId, @JsonProperty("segment") DataSegment segment ) { super( joinId( groupId, "sub", segment.getInterval().getStart(), segment.getInterval().getEnd(), segment.getShardSpec().getPartitionNum() ), groupId, segment.getDataSource(), segment.getInterval() ); this.segment = segment; }
@Override public String apply(DataSegment x) { return StringUtils.format( "%s_%s_%s_%s", x.getInterval().getStart(), x.getInterval().getEnd(), x.getVersion(), x.getShardSpec().getPartitionNum() ); } }
static boolean isHandOffComplete(List<ImmutableSegmentLoadInfo> serverView, SegmentDescriptor descriptor) { for (ImmutableSegmentLoadInfo segmentLoadInfo : serverView) { if (segmentLoadInfo.getSegment().getInterval().contains(descriptor.getInterval()) && segmentLoadInfo.getSegment().getShardSpec().getPartitionNum() == descriptor.getPartitionNumber() && segmentLoadInfo.getSegment().getVersion().compareTo(descriptor.getVersion()) >= 0 && Iterables.any( segmentLoadInfo.getServers(), new Predicate<DruidServerMetadata>() { @Override public boolean apply(DruidServerMetadata input) { return input.segmentReplicatable(); } } )) { return true; } } return false; }
holder.getInterval(), theSink.getSegment().getVersion(), theSink.getSegment().getShardSpec().getPartitionNum()
); if ((entry != null) && (entry.getChunk(segment.getShardSpec().getPartitionNum()) != null)) { log.warn("Told to load a adapter for a segment[%s] that already exists", segment.getIdentifier()); resultSupplier.set(false);