private static ShardSpec getNextPartitionShardSpec(ShardSpec shardSpec) { if (shardSpec instanceof LinearShardSpec) { return new LinearShardSpec(shardSpec.getPartitionNum() + 1); } else if (shardSpec instanceof NumberedShardSpec) { return new NumberedShardSpec(shardSpec.getPartitionNum(), ((NumberedShardSpec) shardSpec).getPartitions()); } else { // Druid only support appending more partitions to Linear and Numbered ShardSpecs. throw new IllegalStateException(String.format("Cannot expand shard spec [%s]", shardSpec)); } }
private DataSegment createSegment(String location) throws IOException { return createSegment(location, new Interval(100, 170, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); }
interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(0)); return currentOpenSegment; } else if (currentOpenSegment.getInterval().equals(interval)) { interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(currentOpenSegment.getShardSpec().getPartitionNum() + 1)); pushSegments(Lists.newArrayList(currentOpenSegment)); LOG.info("Creating new partition for segment {}, partition num {}", interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(0)); pushSegments(Lists.newArrayList(currentOpenSegment)); LOG.info("Creating segment {}", retVal.getIdentifierAsString());
interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(partitionNumber)); interval, tuningConfig.getVersioningPolicy().getVersion(interval), new LinearShardSpec(partitionNumber));
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_2.zip").toString(), new Interval(150, 200, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_3.zip").toString(), new Interval(200, 300, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(taskDirPath.toString()); new Interval(100, 300, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath =
new Interval(200, 250, DateTimeZone.UTC), "v0", new LinearShardSpec(0)), createSegment(new Path(taskDirPath, "index_old_3.zip").toString(), new Interval(250, 300, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(taskDirPath.toString()); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY))); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(180, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0));
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY))); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath = new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(1)); Path segmentPath =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)); Path descriptorPath =
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(180, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath1 = new Interval(200, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath2 = new Interval(100, 200, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath3 =
max.getInterval(), max.getVersion(), new LinearShardSpec(max.getShardSpec().getPartitionNum() + 1) ); } else if (max.getShardSpec() instanceof NumberedShardSpec) {