@Override public Observable<MarkedEdge> loadEdgeVersions( final SearchByEdge searchByEdge ) { final Observable<MarkedEdge> edges = Observable.create( new ObservableIterator<MarkedEdge>( "getEdgeTypesFromSource" ) { @Override protected Iterator<MarkedEdge> getIterator() { return storageEdgeSerialization.getEdgeVersions( scope, searchByEdge ); } } ).buffer( graphFig.getScanPageSize() ) .compose( new EdgeBufferFilter( searchByEdge.filterMarked() ) ); return ObservableTimer.time( edges, loadEdgesVersionsTimer ); }
@Inject public AsyncTaskExecutorImpl(final GraphFig graphFig){ this.taskExecutor = MoreExecutors.listeningDecorator( TaskExecutorFactory .createTaskExecutor( "GraphTaskExecutor", graphFig.getShardAuditWorkerCount(), graphFig.getShardAuditWorkerQueueSize(), TaskExecutorFactory.RejectionAction.ABORT ) ); }
@Before public void setup() { scope = mock( ApplicationScope.class ); Id orgId = mock( Id.class ); when( orgId.getType() ).thenReturn( "organization" ); when( orgId.getUuid() ).thenReturn( UUIDGenerator.newTimeUUID() ); when( scope.getApplication() ).thenReturn( orgId ); graphFig = mock( GraphFig.class ); when( graphFig.getShardCacheSize() ).thenReturn( 10000l ); when( graphFig.getShardSize() ).thenReturn( 20000l ); final long timeout = 30000; when( graphFig.getShardCacheTimeout() ).thenReturn( timeout ); when( graphFig.getShardMinDelta() ).thenReturn( ( long ) (timeout * 2.5) ); }
@Override public long getMinTime() { final long minimumAllowed = ( long ) (2.5 * graphFig.getShardCacheTimeout()); final long minDelta = graphFig.getShardMinDelta(); if ( minDelta < minimumAllowed ) { throw new GraphRuntimeException( String .format( "You must configure the property %s to be >= 2 x %s. Otherwise you risk losing data", GraphFig.SHARD_MIN_DELTA, GraphFig.SHARD_CACHE_TIMEOUT ) ); } return timeService.getCurrentTime() - minDelta; }
/** * This is a race condition. We could re-init the shard while another thread is reading it. This is fine, the read * doesn't have to be precise. The algorithm accounts for stale data. */ private void updateCache() { if ( this.refreshExecutors != null ) { this.refreshExecutors.shutdown(); } this.refreshExecutors = MoreExecutors .listeningDecorator( Executors.newScheduledThreadPool( graphFig.getShardCacheRefreshWorkerCount() ) ); this.graphs = CacheBuilder.newBuilder() //we want to asynchronously load new values for existing ones, that way we wont' have to //wait for a trip to cassandra .refreshAfterWrite( graphFig.getShardCacheTimeout(), TimeUnit.MILLISECONDS ) //set a static cache entry size here .maximumSize(graphFig.getShardCacheSize()) //set our shard loader .build( new ShardCacheLoader() ); }
final long shardSize = graphFig.getShardSize(); final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout() + 120000;
graphFig.getScanPageSize(), graphFig.getSmartShardSeekEnabled() );
private GraphFig getFigMock() { final GraphFig graphFig = mock( GraphFig.class ); when( graphFig.getShardCacheSize() ).thenReturn( 1000l ); when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l ); return graphFig; } }
final long shardSize = graphFig.getShardSize();
@Test public void minTime() { final ShardGroupCompaction shardGroupCompaction = mock( ShardGroupCompaction.class ); final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class ); final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class ); final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class ); final TimeService timeService = mock( TimeService.class ); final NodeShardCache nodeShardCache = mock( NodeShardCache.class); NodeShardAllocation approximation = new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, timeService, graphFig, shardGroupCompaction, nodeShardCache ); final long timeservicetime = System.currentTimeMillis(); when( timeService.getCurrentTime() ).thenReturn( timeservicetime ); final long expected = ( long ) (timeservicetime - 2.5 * graphFig.getShardCacheTimeout()); final long returned = approximation.getMinTime(); assertEquals( "Correct time was returned", expected, returned ); }
graphFig.getRepairConcurrentSize() )
@Override public Iterator<ShardEntryGroup> getShards(final ApplicationScope scope, final DirectedEdgeMeta directedEdgeMeta) { ValidationUtils.validateApplicationScope( scope ); GraphValidation.validateDirectedEdgeMeta( directedEdgeMeta ); Iterator<Shard> existingShards; //its a new node, it doesn't need to check cassandra, it won't exist if ( isNewNode( directedEdgeMeta ) ) { existingShards = Collections.singleton( Shard.MIN_SHARD ).iterator(); } else { existingShards = edgeShardSerialization.getShardMetaData( scope, Optional.absent(), directedEdgeMeta ); /** * We didn't get anything out of cassandra, so we need to create the minimum shard */ if ( existingShards == null || !existingShards.hasNext() ) { final MutationBatch batch = edgeShardSerialization.writeShardMeta( scope, Shard.MIN_SHARD, directedEdgeMeta ); try { batch.execute(); } catch ( ConnectionException e ) { throw new RuntimeException( "Unable to connect to casandra", e ); } existingShards = Collections.singleton( Shard.MIN_SHARD ).iterator(); } } return new ShardEntryGroupIterator( existingShards, graphFig.getShardMinDelta(), shardGroupCompaction, scope, directedEdgeMeta ); }
/** * @param nodeShardAllocation * @param graphFig */ @Inject public NodeShardCacheImpl( final NodeShardAllocation nodeShardAllocation, final GraphFig graphFig) { Preconditions.checkNotNull( nodeShardAllocation, "nodeShardAllocation is required" ); Preconditions.checkNotNull( graphFig, "consistencyFig is required" ); this.nodeShardAllocation = nodeShardAllocation; this.graphFig = graphFig; /** * Add our listener to reconstruct the shard */ this.graphFig.addPropertyChangeListener( new PropertyChangeListener() { @Override public void propertyChange( final PropertyChangeEvent evt ) { final String propertyName = evt.getPropertyName(); if ( propertyName.equals( GraphFig.SHARD_CACHE_SIZE ) || propertyName .equals( GraphFig.SHARD_CACHE_TIMEOUT ) || propertyName .equals( GraphFig.SHARD_CACHE_REFRESH_WORKERS ) ) { updateCache(); } } } ); /** * Initialize the shard cache */ updateCache(); }
final long shardSize = graphFig.getShardSize(); final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();
when( graphFig.getShardCacheTimeout() ).thenReturn( 30000l ); when( graphFig.getShardMinDelta() ).thenReturn( tooSmallDelta ); when( graphFig.getShardMinDelta() ).thenReturn( minDelta ); when( graphFig.getShardMinDelta() ).thenReturn( delta );
graphFig.getScanPageSize(), graphFig.getSmartShardSeekEnabled() );
@Test public void lowCountFutureShard() { final ShardGroupCompaction shardGroupCompaction = mock( ShardGroupCompaction.class ); final EdgeShardSerialization edgeShardSerialization = mock( EdgeShardSerialization.class ); final EdgeColumnFamilies edgeColumnFamilies = mock( EdgeColumnFamilies.class ); final ShardedEdgeSerialization shardedEdgeSerialization = mock( ShardedEdgeSerialization.class ); final TimeService timeService = mock( TimeService.class ); final NodeShardCache nodeShardCache = mock( NodeShardCache.class); NodeShardAllocation approximation = new NodeShardAllocationImpl( edgeShardSerialization, edgeColumnFamilies, shardedEdgeSerialization, timeService, graphFig, shardGroupCompaction, nodeShardCache ); final Id nodeId = IdGenerator.createId( "test" ); final String type = "type"; final String subType = "subType"; final long timeservicetime = System.currentTimeMillis(); when( timeService.getCurrentTime() ).thenReturn( timeservicetime ); final Shard futureShard = new Shard( 10000l, timeservicetime, true ); final ShardEntryGroup shardEntryGroup = new ShardEntryGroup( 1000l ); shardEntryGroup.addShard( futureShard ); final DirectedEdgeMeta targetEdgeMeta = DirectedEdgeMeta.fromSourceNodeTargetType( nodeId, type, subType ); //return a shard size < our max by 1 final long count = graphFig.getShardSize() - 1; final boolean result = approximation.auditShard( scope, shardEntryGroup, targetEdgeMeta ); assertFalse( "Shard allocated", result ); }
final long returnTime = System.currentTimeMillis() + graphFig.getShardCacheTimeout() * 2;
@Test public void cleanTargetMultipleEdgeBuffer() throws ConnectionException { final Id targetId = IdGenerator.createId( "target" ); final String edgeType = "test"; final int size = graphFig.getRepairConcurrentSize() * 2; Set<MarkedEdge> writtenEdges = new HashSet<MarkedEdge>(); for ( int i = 0; i < size; i++ ) { MarkedEdge edge = createEdge( IdGenerator.createId( "source" + i ), edgeType, targetId ); storageEdgeSerialization.writeEdge( scope, edge, UUIDGenerator.newTimeUUID() ).execute(); edgeMetadataSerialization.writeEdge( scope, edge ).execute(); writtenEdges.add( edge ); } long cleanupVersion = System.currentTimeMillis(); int value = edgeMetaRepair.repairTargets( scope, targetId, edgeType, cleanupVersion ).toBlocking() .single(); assertEquals( "No subtypes removed, edges exist", size, value ); //now delete the edge for ( MarkedEdge created : writtenEdges ) { storageEdgeSerialization.deleteEdge( scope, created, UUIDGenerator.newTimeUUID() ).execute(); } value = edgeMetaRepair.repairTargets( scope, targetId, edgeType, cleanupVersion ).toBlocking().last(); assertEquals( "Subtypes removed", 0, value ); //now verify they're gone Iterator<String> edgeTypes = edgeMetadataSerialization.getEdgeTypesToTarget( scope, new SimpleSearchEdgeType( targetId, null, null ) ); assertFalse( "No edge types exist", edgeTypes.hasNext() ); Iterator<String> sourceTypes = edgeMetadataSerialization .getIdTypesToTarget( scope, new SimpleSearchIdType( targetId, edgeType, null, null ) ); assertFalse( "No edge types exist", sourceTypes.hasNext() ); }
final long shardSize = graphFig.getShardSize(); final long minExecutionTime = graphFig.getShardMinDelta() + graphFig.getShardCacheTimeout();