@Test public void testSimpleUnUsedList() throws IOException { coordinator.announceHistoricalSegments(SEGMENTS); unUseSegment(); Assert.assertEquals( SEGMENTS, ImmutableSet.copyOf( coordinator.getUnusedSegmentsForInterval( defaultSegment.getDataSource(), defaultSegment.getInterval() ) ) ); }
enum DataSourceMetadataUpdateResult { SUCCESS, FAILURE, TRY_AGAIN }
@Override protected DataSourceMetadataUpdateResult updateDataSourceMetadataWithHandle( Handle handle, String dataSource, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata ) throws IOException { // Count number of times this method is called. metadataUpdateCounter.getAndIncrement(); return super.updateDataSourceMetadataWithHandle(handle, dataSource, startMetadata, endMetadata); } };
@Test public void testDeleteDataSourceMetadata() throws IOException { coordinator.announceHistoricalSegments( ImmutableSet.of(defaultSegment), new ObjectMetadata(null), new ObjectMetadata(ImmutableMap.of("foo", "bar")) ); Assert.assertEquals( new ObjectMetadata(ImmutableMap.of("foo", "bar")), coordinator.getDataSourceMetadata("fooDataSource") ); Assert.assertFalse("deleteInvalidDataSourceMetadata", coordinator.deleteDataSourceMetadata("nonExistentDS")); Assert.assertTrue("deleteValidDataSourceMetadata", coordinator.deleteDataSourceMetadata("fooDataSource")); Assert.assertNull("getDataSourceMetadataNullAfterDelete", coordinator.getDataSourceMetadata("fooDataSource")); }
@Test public void testUsedOverlapHigh() throws IOException { coordinator.announceHistoricalSegments(SEGMENTS); Assert.assertEquals( SEGMENTS, ImmutableSet.copyOf( coordinator.getUsedSegmentsForInterval( defaultSegment.getDataSource(), Intervals.of("2015-1-1T23:59:59.999Z/2015-02-01T00Z") ) ) ); }
@Override public Set<DataSegment> announceHistoricalSegments(Set<DataSegment> segments) throws IOException { Set<DataSegment> result = super.announceHistoricalSegments(segments); Assert.assertFalse( "Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null ); publishedSegments.addAll(result); segments.forEach(s -> segmentLatch.countDown()); return result; }
final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId( handle.createQuery( StringUtils.format( final SegmentIdWithShardSpec newIdentifier = createNewSegment(handle, dataSource, interval, maxVersion); if (newIdentifier == null) { return null; ); insertToMetastore( handle, newIdentifier,
final SegmentPublishResult result1 = coordinator.announceHistoricalSegments( ImmutableSet.of(defaultSegment), new ObjectMetadata(null), final SegmentPublishResult result2 = coordinator.announceHistoricalSegments( ImmutableSet.of(defaultSegment2), new ObjectMetadata(ImmutableMap.of("foo", "bar")), coordinator.getDataSourceMetadata("fooDataSource") );
@Test public void testMultiIntervalUsedList() throws IOException coordinator.announceHistoricalSegments(SEGMENTS); coordinator.announceHistoricalSegments(ImmutableSet.of(defaultSegment3)); coordinator.getUsedSegmentsForIntervals( defaultSegment.getDataSource(), ImmutableList.of(defaultSegment.getInterval()) ImmutableSet.of(defaultSegment3), ImmutableSet.copyOf( coordinator.getUsedSegmentsForIntervals( defaultSegment.getDataSource(), ImmutableList.of(defaultSegment3.getInterval()) ImmutableSet.of(defaultSegment, defaultSegment2, defaultSegment3), ImmutableSet.copyOf( coordinator.getUsedSegmentsForIntervals( defaultSegment.getDataSource(), ImmutableList.of(defaultSegment.getInterval(), defaultSegment3.getInterval()) coordinator.getUsedSegmentsForIntervals( defaultSegment.getDataSource(), ImmutableList.of(
final SegmentIdWithShardSpec identifier = coordinator.allocatePendingSegment( dataSource, "seq", final SegmentIdWithShardSpec identifier = coordinator.allocatePendingSegment( dataSource, "seq", final int numDeleted = coordinator.deletePendingSegments(dataSource, new Interval(begin, secondBegin)); Assert.assertEquals(10, numDeleted);
final SegmentIdWithShardSpec identifier = coordinator.allocatePendingSegment( dataSource, "seq", final SegmentIdWithShardSpec identifier1 = coordinator.allocatePendingSegment( dataSource, "seq", final SegmentIdWithShardSpec identifier2 = coordinator.allocatePendingSegment( dataSource, "seq", final SegmentIdWithShardSpec identifier3 = coordinator.allocatePendingSegment( dataSource, "seq", Assert.assertEquals(identifier2, identifier3); final SegmentIdWithShardSpec identifier4 = coordinator.allocatePendingSegment( dataSource, "seq1",
@Before public void setUp() { final SQLMetadataConnector connector = derbyConnectorRule.getConnector(); connector.createTaskTables(); taskStorage = new HeapMemoryTaskStorage(new TaskStorageConfig(null)); storageCoordinator = new IndexerSQLMetadataStorageCoordinator( objectMapper, derbyConnectorRule.metadataTablesConfigSupplier().get(), derbyConnectorRule.getConnector() ); lockbox = new TaskLockbox(taskStorage); }
@Test public void testSimpleUsedList() throws IOException { coordinator.announceHistoricalSegments(SEGMENTS); Assert.assertEquals( SEGMENTS, ImmutableSet.copyOf( coordinator.getUsedSegmentsForInterval( defaultSegment.getDataSource(), defaultSegment.getInterval() ) ) ); }
/** * Attempts to insert a set of segments to the database. Returns the set of segments actually added (segments * with identifiers already in the database will not be added). * * @param segments set of segments to add * * @return set of segments actually added */ @Override public Set<DataSegment> announceHistoricalSegments(final Set<DataSegment> segments) throws IOException { final SegmentPublishResult result = announceHistoricalSegments(segments, null, null); // Metadata transaction cannot fail because we are not trying to do one. if (!result.isSuccess()) { throw new ISE("WTF?! announceHistoricalSegments failed with null metadata, should not happen."); } return result.getSegments(); }
) throws IOException final CheckExistingSegmentIdResult result = checkAndGetExistingSegmentId( handle.createQuery( StringUtils.format( final SegmentIdWithShardSpec newIdentifier = createNewSegment(handle, dataSource, interval, maxVersion); if (newIdentifier == null) { return null; insertToMetastore(handle, newIdentifier, dataSource, interval, "", sequenceName, sequenceNamePrevIdSha1);
final SegmentPublishResult result1 = failOnceCoordinator.announceHistoricalSegments( ImmutableSet.of(defaultSegment), new ObjectMetadata(null), final SegmentPublishResult result2 = failOnceCoordinator.announceHistoricalSegments( ImmutableSet.of(defaultSegment2), new ObjectMetadata(ImmutableMap.of("foo", "bar")), failOnceCoordinator.getDataSourceMetadata("fooDataSource") );
); final ObjectMapper objectMapper = new TestUtils().getTestObjectMapper(); metadataStorageCoordinator = new IndexerSQLMetadataStorageCoordinator( objectMapper, metadataStorageTablesConfig,
@Test public void testUnUsedBigOverlap() throws IOException { coordinator.announceHistoricalSegments(SEGMENTS); unUseSegment(); Assert.assertEquals( SEGMENTS, ImmutableSet.copyOf( coordinator.getUnusedSegmentsForInterval( defaultSegment.getDataSource(), Intervals.of("2000/2999") ) ) ); }
@Test public void testUsedOverlapLow() throws IOException { coordinator.announceHistoricalSegments(SEGMENTS); Set<DataSegment> actualSegments = ImmutableSet.copyOf( coordinator.getUsedSegmentsForInterval( defaultSegment.getDataSource(), Intervals.of("2014-12-31T23:59:59.999Z/2015-01-01T00:00:00.001Z") // end is exclusive ) ); Assert.assertEquals( SEGMENTS, actualSegments ); }
@Override public SegmentPublishResult announceHistoricalSegments( Set<DataSegment> segments, DataSourceMetadata startMetadata, DataSourceMetadata endMetadata ) throws IOException { SegmentPublishResult result = super.announceHistoricalSegments(segments, startMetadata, endMetadata); Assert.assertFalse( "Segment latch not initialized, did you forget to call expectPublishSegments?", segmentLatch == null ); publishedSegments.addAll(result.getSegments()); result.getSegments().forEach(s -> segmentLatch.countDown()); return result; } };