@Override public boolean isConnectionMember( String connectionType, EntityRef entity ) throws Exception { Id entityId = new SimpleId( entity.getUuid(), entity.getType() ); if (logger.isTraceEnabled()) { logger.trace("isConnectionMember(): Checking for edge type {} from {}:{} to {}:{}", connectionType, headEntity.getType(), headEntity.getUuid(), entity.getType(), entity.getUuid() ); } GraphManager gm = managerCache.getGraphManager( applicationScope ); Observable<MarkedEdge> edges = gm.loadEdgeVersions( CpNamingUtils .createEdgeFromConnectionType( new SimpleId( headEntity.getUuid(), headEntity.getType() ), connectionType, entityId ) ); return edges.toBlocking().firstOrDefault( null ) != null; }
@SuppressWarnings( "unchecked" ) @Override public boolean isCollectionMember( String collectionName, EntityRef entity ) throws Exception { Id entityId = new SimpleId( entity.getUuid(), entity.getType() ); if (logger.isTraceEnabled()) { logger.trace("isCollectionMember(): Checking for edge type {} from {}:{} to {}:{}", collectionName, headEntity.getType(), headEntity.getUuid(), entity.getType(), entity.getUuid() ); } GraphManager gm = managerCache.getGraphManager( applicationScope ); Observable<MarkedEdge> edges = gm.loadEdgeVersions( CpNamingUtils .createEdgeFromCollectionName( new SimpleId( headEntity.getUuid(), headEntity.getType() ), collectionName, entityId ) ); return edges.toBlocking().firstOrDefault( null ) != null; }
@Override public void removeItemFromCollection( String collectionName, EntityRef itemRef ) throws Exception { Id entityId = new SimpleId( itemRef.getUuid(), itemRef.getType() ); // remove edge from collection to item GraphManager gm = managerCache.getGraphManager( applicationScope ); // mark the edge versions and take the first for later delete edge queue event ( load is descending ) final Edge markedSourceEdge = gm.loadEdgeVersions( CpNamingUtils.createEdgeFromCollectionName( cpHeadEntity.getId(), collectionName, entityId ) ) .flatMap(edge -> gm.markEdge(edge)).toBlocking().firstOrDefault(null); Edge markedReversedEdge = null; CollectionInfo collection = getDefaultSchema().getCollection( headEntity.getType(), collectionName ); if (collection != null && collection.getLinkedCollection() != null) { // delete reverse edges final String pluralType = InflectionUtils.pluralize( cpHeadEntity.getId().getType() ); markedReversedEdge = gm.loadEdgeVersions( CpNamingUtils.createEdgeFromCollectionName( entityId, pluralType, cpHeadEntity.getId() ) ) .flatMap(reverseEdge -> gm.markEdge(reverseEdge)).toBlocking().firstOrDefault(null); } /** * Remove from the index. This will call gm.deleteEdge which also deletes the reverse edge(s) and de-indexes * older versions of the edge(s). * */ if( markedSourceEdge != null ) { indexService.queueDeleteEdge(applicationScope, markedSourceEdge); } if( markedReversedEdge != null ){ indexService.queueDeleteEdge(applicationScope, markedReversedEdge); } }
}).toBlocking().firstOrDefault(true); ecm.getVersionsFromMaxToMin( entityId, UUIDUtils.newTimeUUID() ) .toBlocking() .firstOrDefault( null, mvccLogEntry -> mvccLogEntry.getState() == MvccLogEntry.State.DELETED );
final Edge toBeMarkedEdge = graphManager.markEdge( connectionSearch ).toBlocking().firstOrDefault( null ); final Edge toBeDeletedEdge = graphManager.deleteEdge( toBeMarkedEdge ).toBlocking().firstOrDefault( null );
final Edge toBeMarkedEdge = graphManager.markEdge( connectionSearch ).toBlocking().firstOrDefault( null ); final Edge toBeDeletedEdge = graphManager.deleteEdge( toBeMarkedEdge ).toBlocking().first();
manager.mark( originalUser.getId(), null ).toBlocking().firstOrDefault( null );
@Override public void run() { Integer i = Observable.create(new Observable.OnSubscribe<Integer>() { @Override public void call(Subscriber<? super Integer> subscriber) { subscriber.onCompleted(); } }).subscribeOn(Schedulers.newThread()).toBlocking().firstOrDefault(5000); log(i); } });
public TransitionResponse getTransitionsOfIssue(String issue) { return jiraEndPoints.getTransitionsOfIssue(issue).toBlocking().firstOrDefault(null); }
public Optional<User> getInternalUser(final String id) { String q = "match (u:User) where u.username={username} return u.username, 'dummy' as dummy"; JsonNode n = neo4j.execCypher(q, "username", id.toLowerCase()) .toBlocking().firstOrDefault(null); if (n != null) { User u = new User(); u.username = n.get("u.username").asText(); u.roles = ImmutableList.copyOf(findRolesForUser(id.toLowerCase())); return Optional.of(u); } return Optional.absent(); }
n.put("username", username); JsonNode userNode = neo4j.execCypher(q, "username", username) .toBlocking().firstOrDefault(null); if (userNode != null) {
@PostConstruct public void startRegistryAuditing() { InstanceInfo auditServer = serverIdentity.resolve().toBlocking().firstOrDefault(null); final String auditServerId = auditServer == null ? null : auditServer.getId(); // TODO: this should be only Origin.Local, but since bridge works on replication channel we would not audit eureka 1.0 entries. registry.forInterest(Interests.forFullRegistry()).subscribe(new Subscriber<ChangeNotification<InstanceInfo>>() { @Override public void onCompleted() { logger.warn("Registry auditing finished"); } @Override public void onError(Throwable e) { logger.warn("Registry auditing finished due to an error in interest channel subscription", e); } @Override public void onNext(ChangeNotification<InstanceInfo> notification) { AuditRecord record = AuditRecords.forChangeNotification(auditServerId, System.currentTimeMillis(), false, notification); auditService.write(record); } }); } }
private void maybeScheduleTempDataCompressor(List<JobDetails> backgroundJobs) { Configuration config = configurationService.load(TEMP_DATA_COMPRESSOR_CONFIG_ID).toBlocking() .firstOrDefault(new Configuration(TEMP_DATA_COMPRESSOR_CONFIG_ID, new HashMap<>())); if (config.get("jobId") == null) { logger.info("Preparing to create and schedule " + TEMP_DATA_COMPRESSOR_JOB + " job"); // Get next start of odd hour long nextStart = LocalDateTime.now(ZoneOffset.UTC) .with(DateTimeService.startOfNextOddHour()) .toInstant(ZoneOffset.UTC).toEpochMilli(); // Temp table processing JobDetails jobDetails = scheduler.scheduleJob(TEMP_DATA_COMPRESSOR_JOB, TEMP_DATA_COMPRESSOR_JOB, ImmutableMap.of(), new RepeatingTrigger.Builder().withTriggerTime(nextStart) .withInterval(2, TimeUnit.HOURS).build()).toBlocking().value(); backgroundJobs.add(jobDetails); configurationService.save(TEMP_DATA_COMPRESSOR_CONFIG_ID, "jobId", jobDetails.getJobId().toString()) .toBlocking(); logger.info("Created and scheduled " + jobDetails); } }
private void maybeScheduleTableCreator(List<JobDetails> backgroundJobs) { Configuration config = configurationService.load(TEMP_TABLE_CREATE_CONFIG_ID).toBlocking() .firstOrDefault(new Configuration(TEMP_TABLE_CREATE_CONFIG_ID, new HashMap<>())); if (config.get("jobId") == null) { long nextTrigger = LocalDateTime.now(ZoneOffset.UTC) .truncatedTo(ChronoUnit.MINUTES).plusMinutes(2) .toInstant(ZoneOffset.UTC).toEpochMilli(); JobDetails jobDetails = scheduler.scheduleJob(TEMP_TABLE_CREATOR_JOB, TEMP_TABLE_CREATOR_JOB, ImmutableMap.of(), new RepeatingTrigger.Builder().withTriggerTime(nextTrigger) .withInterval(2, TimeUnit.HOURS).build()).toBlocking().value(); backgroundJobs.add(jobDetails); configurationService.save(TEMP_TABLE_CREATE_CONFIG_ID, "jobId", jobDetails.getJobId().toString()) .toBlocking(); logger.info("Scheduled temporary table creator " + jobDetails); } }
private void unscheduleCompressData() { Configuration config = configurationService.load(COMPRESS_DATA_CONFIG_ID).toBlocking() .firstOrDefault(new Configuration(COMPRESS_DATA_CONFIG_ID, new HashMap<>())); String jobId = config.get("jobId"); if (config.getProperties().isEmpty()) { // This means we have a new installation and not an upgrade. The CompressData job has not been previously // installed so there is no db clean up necessary. } else { Completable unscheduled; if (jobId == null) { logger.info("Expected to find a jobId property in database for {}. Attempting to unschedule job by " + "name.", COMPRESS_DATA_JOB); unscheduled = scheduler.unscheduleJobByTypeAndName(COMPRESS_DATA_JOB, COMPRESS_DATA_JOB); } else { unscheduled = scheduler.unscheduleJobById(jobId); } unscheduled.await(); if (!config.getProperties().isEmpty()) { configurationService.delete(COMPRESS_DATA_CONFIG_ID).await(); } } }
final Response response = responseObservable.toBlocking().firstOrDefault(null); .toMap(Tuple2::_1, Tuple2::_2); final Optional<Map<String, Model>> optionalModelMap = optionalModelMapObservable.map(Optional::of).toBlocking() .firstOrDefault(Optional.empty());
final Response response = responseObservable.toBlocking().firstOrDefault(null); .toMap(Tuple2::_1, Tuple2::_2); final Optional<Map<String, Model>> optionalModelMap = optionalModelMapObservable.map(Optional::of).toBlocking() .firstOrDefault(Optional.empty());
final Response response = responseObservable.toBlocking().firstOrDefault(null); .toMap(Tuple2::_1, Tuple2::_2); final Optional<Map<String, Model>> optionalModelMap = optionalModelMapObservable.map(Optional::of).toBlocking() .firstOrDefault(Optional.empty());