@Override public MetadataSegmentPublisher get() { return new SQLMetadataSegmentPublisher(jsonMapper, config, connector); } }
@Before public void setUp() throws Exception { TestDerbyConnector connector = derbyConnectorRule.getConnector(); manager = new SQLMetadataSegmentManager( jsonMapper, Suppliers.ofInstance(new MetadataSegmentManagerConfig()), derbyConnectorRule.metadataTablesConfigSupplier(), connector ); publisher = new SQLMetadataSegmentPublisher( jsonMapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), connector ); connector.createSegmentTable(); publisher.publishSegment(segment1); publisher.publishSegment(segment2); }
@Override public void publishSegment(final DataSegment segment) throws IOException { publishSegment( segment.getId().toString(), segment.getDataSource(), DateTimes.nowUtc().toString(), segment.getInterval().getStart().toString(), segment.getInterval().getEnd().toString(), (segment.getShardSpec() instanceof NoneShardSpec) ? false : true, segment.getVersion(), true, jsonMapper.writeValueAsBytes(segment) ); }
@Test public void testRemoveDataSource() throws IOException { manager.start(); manager.poll(); Assert.assertTrue(manager.isStarted()); final String newDataSource = "wikipedia2"; final DataSegment newSegment = new DataSegment( newDataSource, Intervals.of("2017-10-15T00:00:00.000/2017-10-16T00:00:00.000"), "2017-10-15T20:19:12.565Z", ImmutableMap.of( "type", "s3_zip", "bucket", "test", "key", "wikipedia2/index/y=2017/m=10/d=15/2017-10-16T20:19:12.565Z/0/index.zip" ), ImmutableList.of("dim1", "dim2", "dim3"), ImmutableList.of("count", "value"), NoneShardSpec.instance(), 0, 1234L ); publisher.publishSegment(newSegment); Assert.assertNull(manager.getDataSource(newDataSource)); Assert.assertTrue(manager.removeDataSource(newDataSource)); }
@Test public void testRemoveDataSegment() throws IOException { manager.start(); manager.poll(); Assert.assertTrue(manager.isStarted()); final String newDataSource = "wikipedia2"; final DataSegment newSegment = new DataSegment( newDataSource, Intervals.of("2017-10-15T00:00:00.000/2017-10-16T00:00:00.000"), "2017-10-15T20:19:12.565Z", ImmutableMap.of( "type", "s3_zip", "bucket", "test", "key", "wikipedia2/index/y=2017/m=10/d=15/2017-10-16T20:19:12.565Z/0/index.zip" ), ImmutableList.of("dim1", "dim2", "dim3"), ImmutableList.of("count", "value"), NoneShardSpec.instance(), 0, 1234L ); publisher.publishSegment(newSegment); Assert.assertNull(manager.getDataSource(newDataSource)); Assert.assertTrue(manager.removeSegment(newSegment.getId())); }
@Override public MetadataSegmentPublisher get() { return new SQLMetadataSegmentPublisher(jsonMapper, config, connector); } }
@Test public void testPollWithCurroptedSegment() { //create a corrupted segment entry in segments table, which tests //that overall loading of segments from database continues to work //even in one of the entries are corrupted. publisher.publishSegment( "corrupt-segment-id", "corrupt-datasource", "corrupt-create-date", "corrupt-start-date", "corrupt-end-date", true, "corrupt-version", true, StringUtils.toUtf8("corrupt-payload") ); EmittingLogger.registerEmitter(new NoopServiceEmitter()); manager.start(); manager.poll(); Assert.assertTrue(manager.isStarted()); Assert.assertEquals( "wikipedia", Iterables.getOnlyElement(manager.getDataSources()).getName() ); }
@Override public void publishSegment(final DataSegment segment) throws IOException { publishSegment( segment.getIdentifier(), segment.getDataSource(), DateTimes.nowUtc().toString(), segment.getInterval().getStart().toString(), segment.getInterval().getEnd().toString(), (segment.getShardSpec() instanceof NoneShardSpec) ? false : true, segment.getVersion(), true, jsonMapper.writeValueAsBytes(segment) ); }