/** * @param pushedSegment the pushed data segment object * @param segmentsDescriptorDir actual directory path for descriptors. * * @return a sanitize file name */ public static Path makeSegmentDescriptorOutputPath(DataSegment pushedSegment, Path segmentsDescriptorDir) { return new Path(segmentsDescriptorDir, String.format("%s.json", pushedSegment.getIdentifier().replace(":", ""))); }
private DataSegment createSegment(String location, Interval interval, String version, ShardSpec shardSpec) throws IOException { FileUtils.writeStringToFile(new File(location), "dummySegmentData"); return DataSegment.builder() .dataSource(DATA_SOURCE_NAME) .version(version) .interval(interval) .shardSpec(shardSpec) .loadSpec(ImmutableMap.of("path", location)) .build(); }
static Path getPath(DataSegment dataSegment) { return new Path(String.valueOf(Objects.requireNonNull(dataSegment.getLoadSpec()).get("path"))); }
timeline = new VersionedIntervalTimeline<>(Ordering.natural()); } else { List<TimelineObjectHolder<String, DataSegment>> existingChunks = timeline.lookup(segment.getInterval()); if (existingChunks.size() > 1) { + "Not possible to append new segment.", dataSource, segment.getInterval(), existingChunks.size())); for (PartitionChunk<DataSegment> existing : existingHolder.getObject()) { if (max == null || max.getShardSpec().getPartitionNum() < existing.getObject() .getShardSpec() .getPartitionNum()) { max = SegmentIdentifier.fromDataSegment(existing.getObject()); newShardSpec = segment.getShardSpec(); newVersion = segment.getVersion(); } else { dataSegmentPusher); finalSegmentsToPublish.add(publishedSegment); timeline.add(publishedSegment.getInterval(), publishedSegment.getVersion(), publishedSegment.getShardSpec().createChunk(publishedSegment)); batch.add(new ImmutableMap.Builder<String, Object>().put("id", segment.getIdentifier()) .put("dataSource", segment.getDataSource())
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY))); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath = Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum()); persistedSegment.getLoadSpec()); Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
private static VersionedIntervalTimeline<String, DataSegment> getTimelineForIntervalWithHandle(final Handle handle, final String dataSource, final Interval interval, final MetadataStorageTablesConfig dbTables) throws IOException { Query<Map<String, Object>> sql = handle.createQuery(String.format( "SELECT payload FROM %s WHERE used = true AND dataSource = ? AND start <= ? AND \"end\" >= ?", dbTables.getSegmentsTable())) .bind(0, dataSource) .bind(1, interval.getEnd().toString()) .bind(2, interval.getStart().toString()); final VersionedIntervalTimeline<String, DataSegment> timeline = new VersionedIntervalTimeline<>(Ordering.natural()); try (ResultIterator<byte[]> dbSegments = sql.map(ByteArrayMapper.FIRST).iterator()) { while (dbSegments.hasNext()) { final byte[] payload = dbSegments.next(); DataSegment segment = JSON_MAPPER.readValue(payload, DataSegment.class); timeline.add(segment.getInterval(), segment.getVersion(), segment.getShardSpec().createChunk(segment)); } } return timeline; }
dataSegment.getDataSource(), dataSegment.getIdentifier())); } catch (MalformedURLException e) { Throwables.propagate(e);
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(180, 250, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); DataSegment persistedSegment = Iterables.getOnlyElement(dataSegmentList); Assert.assertEquals(dataSegment, persistedSegment); Assert.assertEquals(dataSegment.getVersion(), persistedSegment.getVersion()); Path expectedFinalHadoopPath = dataSegmentPusher.makeIndexPathName(persistedSegment, DruidStorageHandlerUtils.INDEX_ZIP)); Assert.assertEquals(ImmutableMap.of("type", "hdfs", "path", expectedFinalHadoopPath.toString()), persistedSegment.getLoadSpec()); Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
DataSegmentPusher dataSegmentPusher) throws IOException { boolean retry = true; DataSegment.Builder dataSegmentBuilder = new DataSegment.Builder(segment).version(version); Path finalPath = null; while (retry) { retry = false; dataSegmentBuilder.shardSpec(shardSpec); final Path intermediatePath = getPath(segment); dataSegmentPusher.makeIndexPathName(dataSegmentBuilder.build(), DruidStorageHandlerUtils.INDEX_ZIP)); DataSegment dataSegment = dataSegmentBuilder.loadSpec(dataSegmentPusher.makeLoadSpec(finalPath.toUri())).build();
@Test public void testSerDesr() throws IOException { String segment = "{\"dataSource\":\"datasource2015\",\"interval\":\"2015-06-01T00:00:00.000-04:00/" + "2015-06-02T00:00:00.000-04:00\"" + ",\"version\":\"2016-11-04T19:24:01.732-04:00\",\"loadSpec\":{\"type\":\"hdfs\"," + "\"path\":\"hdfs://cn105-10.l42scl.hortonworks.com:8020/apps/hive/warehouse/druid.db/" + ".hive-staging_hive_2016-11-04_19-23-50_168_1550339856804207572-1/_task_tmp.-ext-10002/_tmp.000000_0/" + "datasource2015/20150601T000000.000-0400_20150602T000000.000-0400/2016-11-04T19_24_01.732-04_00/0/" + "index.zip\"},\"dimensions\":\"dimension1\",\"metrics\":\"bigint\",\"shardSpec\":{\"type\":\"linear\"," + "\"partitionNum\":0},\"binaryVersion\":9,\"size\":1765,\"identifier\":\"datasource2015_2015-06-01" + "T00:00:00.000-04:00_2015-06-02T00:00:00.000-04:00_2016-11-04T19:24:01.732-04:00\"}"; DataSegment dataSegment = objectMapper.readerFor(DataSegment.class).readValue(segment); Assert.assertEquals("datasource2015", dataSegment.getDataSource()); }
@Override public Interval apply(DataSegment segment) { return segment.getInterval(); } }
private void removeSegment(DataSegment segment) { totalSegmentSize -= segment.getSize(); numSegments--; }
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0))); DruidStorageHandlerUtils.publishSegmentsAndCommit(connector, metadataStorageTablesConfig, new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(0)); Path descriptorPath = Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(1, persistedSegment.getShardSpec().getPartitionNum()); persistedSegment.getLoadSpec()); Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
@VisibleForTesting void deleteSegment(DataSegment segment) throws SegmentLoadingException { LOG.info("removing segment {}, located at path {}", segment.getIdentifier(), path);
new Interval(100, 150, DateTimeZone.UTC), "v0", new LinearShardSpec(1))); HdfsDataSegmentPusherConfig pusherConfig = new HdfsDataSegmentPusherConfig(); pusherConfig.setStorageDirectory(config.get(String.valueOf(HiveConf.ConfVars.DRUID_SEGMENT_DIRECTORY))); new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(0)); Path descriptorPath = new Interval(100, 150, DateTimeZone.UTC), "v1", new LinearShardSpec(1)); Path segmentPath = Assert.assertEquals("v0", persistedSegment.getVersion()); Assert.assertTrue(persistedSegment.getShardSpec() instanceof LinearShardSpec); Assert.assertEquals(2, persistedSegment.getShardSpec().getPartitionNum()); Path expectedFinalHadoopPath = persistedSegment.getLoadSpec()); Assert.assertEquals("dummySegmentData", FileUtils.readFileToString(new File(expectedFinalHadoopPath.toUri())));
deleteSegment(dataSegment); } catch (SegmentLoadingException e) { LOG.error(String.format("Error while deleting segment [%s]", dataSegment.getIdentifier()), e);
@Override public String apply(DataSegment dataSegment) { return dataSegment.getIdentifier(); } };
@Override public String apply(DataSegment segment) { return segment.getIdentifier(); } }
@Override public String apply(DataSegment segment) { return segment.getIdentifier(); } }